[CVPR 2024] FinePOSE 源码阅读(二):训练流程、数据流水线与可视化

前言:在FinePOSE 源码阅读(一),我们分析了 FinePOSE 的核心模型代码(mixste_finepose.py)。这篇博客主要来读一下入口文件 main.py。这个文件负责整个项目的运行逻辑,包括怎么加载数据集、怎么构造 Prompt、以及训练和测试的具体流程。

一、 数据加载与预处理

这段代码负责把所有数据读进内存,并做基础的坐标转换。

1. 加载 3D 数据并转换坐标

# 1. 加载 3D 数据集 
print('Loading dataset...')
# 构造 3D 数据集文件的路径,通常是 .npz 格式
dataset_path = './data/data_3d_' + args.dataset + '.npz'

# 根据参数 args.dataset 选择对应的数据集类进行实例化
if args.dataset == 'h36m':
    from common.h36m_dataset import Human36mDataset
    # 加载 Human3.6M 数据集
    dataset = Human36mDataset(dataset_path)
elif args.dataset.startswith('humaneva'):
    from common.humaneva_dataset import HumanEvaDataset
    # 加载 HumanEva 数据集
    dataset = HumanEvaDataset(dataset_path)
elif args.dataset.startswith('custom'):
    from common.custom_dataset import CustomDataset
    # 加载自定义数据集,这里同时需要 2D 数据路径来初始化
    dataset = CustomDataset('./data/data_2d_' + args.dataset + '_' + args.keypoints + '.npz')
else:
    raise KeyError('Invalid dataset') # 如果数据集名称不匹配,抛出错误

# 2. 准备 3D 数据 (坐标系转换)
print('Preparing data...')
# 遍历数据集中的所有受试者 (Subject,例如 S1, S5, S9...)
for subject in dataset.subjects():
    # 遍历该受试者下的所有动作 (Action,例如 Walking)
    for action in dataset[subject].keys():
        anim = dataset[subject][action]

        # 如果该动作包含 3D 位置信息 ('positions' 通常是世界坐标系下的原始数据)
        if 'positions' in anim:
            positions_3d = []
            # 遍历该动作对应的所有摄像机视角
            for cam in anim['cameras']:
                # 【关键步骤】:将世界坐标系转换为相机坐标系
                # 使用外参 R (旋转矩阵) 和 t (平移向量) 进行转换
                pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])
                
                # 【关键步骤】:去除全局偏移
                # pos_3d[:, :1] 获取的是根节点(通常是髋关节)的坐标
                # pos_3d[:, 1:] -= ... 表示将除了根节点以外的所有关节,减去根节点的坐标
                # 这样做是为了让姿态以根节点为中心 (0,0,0),消除人体在空间中绝对位置的影响,只保留相对姿态
                # 注意:切片操作保留了第一列(根节点)的原始轨迹信息用于后续可能的轨迹恢复
                pos_3d[:, 1:] -= pos_3d[:, :1] 
                
                positions_3d.append(pos_3d)
            # 将处理好的相机系 3D 数据存回字典
            anim['positions_3d'] = positions_3d

2. 加载 2D 数据并归一化

这里加载检测好的关键点(如 CPN),并对其进行屏幕坐标归一化。

# 3. 加载 2D 检测数据
print('Loading 2D detections...')
# 加载预先检测好的 2D 关键点文件 (通常由 CPN, Hourglass 等模型生成)
keypoints = np.load('./data/data_2d_' + args.dataset + '_' + args.keypoints + '.npz', allow_pickle=True)

# 读取元数据,包含关键点数量、对称性等信息
keypoints_metadata = keypoints['metadata'].item()
keypoints_symmetry = keypoints_metadata['keypoints_symmetry']

# 获取左右对称的关节点索引 (用于后续的数据增强,如水平翻转)
kps_left, kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])

# 获取 3D 骨架定义的左右关节索引
joints_left, joints_right = list(dataset.skeleton().joints_left()), list(dataset.skeleton().joints_right())

# 提取核心的 2D 坐标数据
keypoints = keypoints['positions_2d'].item()

# 4. 数据对齐与完整性检查
for subject in dataset.subjects():
    # 确保 2D 数据集中包含该受试者
    assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format(subject)
    
    for action in dataset[subject].keys():
        # 确保 2D 数据集中包含该动作
        assert action in keypoints[subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(action, subject)
        
        # 如果该动作没有 3D 数据,跳过
        if 'positions_3d' not in dataset[subject][action]:
            continue

        # 遍历每一个视角的摄像头
        for cam_idx in range(len(keypoints[subject][action])):

            # 获取 3D 数据 (Mocap) 的帧数长度
            mocap_length = dataset[subject][action]['positions_3d'][cam_idx].shape[0]
            
            # 【关键步骤】:检查帧数是否匹配
            # 我们检查 >= 而不是 ==,因为 Human3.6M 的某些视频帧数比 Mocap 数据多
            assert keypoints[subject][action][cam_idx].shape[0] >= mocap_length

            # 如果 2D 检测数据的帧数多于 3D 真值数据
            if keypoints[subject][action][cam_idx].shape[0] > mocap_length:
                # 截断 2D 数据,使其长度与 3D 数据完全一致
                keypoints[subject][action][cam_idx] = keypoints[subject][action][cam_idx][:mocap_length]

        # 再次确认 2D 视角的数量与 3D 视角的数量一致
        assert len(keypoints[subject][action]) == len(dataset[subject][action]['positions_3d'])

# 5. 2D 坐标归一化 (Normalization)
for subject in keypoints.keys():
    for action in keypoints[subject]:
        for cam_idx, kps in enumerate(keypoints[subject][action]):
            # 获取对应摄像头的参数 (分辨率宽 w 和高 h)
            cam = dataset.cameras()[subject][cam_idx]
            
            # 【关键步骤】:归一化屏幕坐标
            # 将像素坐标 (如 x=1000, y=500) 转换为归一化坐标 (通常是 [-1, 1] 范围)
            # 这样做是为了让模型对图像分辨率不敏感
            kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h'])
            
            # 更新归一化后的数据
            keypoints[subject][action][cam_idx] = kps

二、 核心数据提取函数 (fetch)

fetch 函数定义在全局数据加载之后。它的作用是根据你指定的 subjects(如 S1, S5)和 action_filter,从上面加载好的大字典里,把需要的数据提取并打包成列表。

def fetch(subjects, action_filter=None, subset=1, parse_3d_poses=True):
    # 初始化列表,用于存储最终返回的数据
    out_poses_3d = []      # 存储 3D 姿态真值
    out_poses_2d = []      # 存储 2D 关键点输入
    out_camera_params = [] # 存储相机内参
    out_action = []        # 存储动作名称标签

    # 第一层循环:遍历指定的受试者 (Subject)
    for subject in subjects:
        # 第二层循环:遍历该受试者下的所有动作 (Action)
        for action in keypoints[subject].keys():
            
            # 动作过滤逻辑
            if action_filter is not None:
                found = False
                for a in action_filter:
                    # 如果动作名称以过滤器中的字符串开头 (例如 'Walking 1' startswith 'Walking')
                    if action.startswith(a):
                        found = True
                        break
                if not found:
                    continue # 如果不匹配,跳过该动作

            #收集 2D 数据 
            poses_2d = keypoints[subject][action]
            # 遍历该动作下的所有摄像头视角 (通常 H3.6M 有 4 个视角)
            for i in range(len(poses_2d)): 
                out_poses_2d.append(poses_2d[i]) # 添加 2D 数据
                out_action.append(action)        # 添加对应的动作标签

            #  收集相机参数
            if subject in dataset.cameras():
                cams = dataset.cameras()[subject]
                assert len(cams) == len(poses_2d), 'Camera count mismatch'
                for cam in cams:
                    if 'intrinsic' in cam:
                        out_camera_params.append(cam['intrinsic']) # 添加相机内参

            # --- 收集 3D 数据 (如果需要) ---
            if parse_3d_poses and 'positions_3d' in dataset[subject][action]:
                poses_3d = dataset[subject][action]['positions_3d']
                assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
                for i in range(len(poses_3d)): # 遍历所有摄像头视角
                    out_poses_3d.append(poses_3d[i])

    # 如果没有收集到数据,置为 None
    if len(out_camera_params) == 0:
        out_camera_params = None
    if len(out_poses_3d) == 0:
        out_poses_3d = None

    # --- 数据下采样与子集切分逻辑 ---
    stride = args.downsample # 下采样步长 (例如每隔多少帧取一帧)
    
    # 情况 1: 如果只需要数据的一个子集 (subset < 1)
    if subset < 1:
        for i in range(len(out_poses_2d)):
            # 计算需要保留的帧数
            n_frames = int(round(len(out_poses_2d[i])//stride * subset)*stride)
            # 随机选择一个起始点 (使用确定性随机数,保证可复现)
            start = deterministic_random(0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i])))
            
            # 对 2D 数据进行切片:[起始点 : 起始点+长度 : 步长]
            out_poses_2d[i] = out_poses_2d[i][start:start+n_frames:stride]
            if out_poses_3d is not None:
                # 对 3D 数据进行同样的切片
                out_poses_3d[i] = out_poses_3d[i][start:start+n_frames:stride]
    
    # 情况 2: 如果需要完整数据但需要下采样 (stride > 1)
    elif stride > 1:
        # 按步长进行切片 [::stride]
        for i in range(len(out_poses_2d)):
            out_poses_2d[i] = out_poses_2d[i][::stride]
            if out_poses_3d is not None:
                out_poses_3d[i] = out_poses_3d[i][::stride]

    # 返回整理好的列表
    return out_camera_params, out_poses_3d, out_poses_2d, out_action

三、 模型构建与静态 Prompt

1. 模型实例化

这里实例化了三个模型对象,分别用于不同的用途。

# --- 初始化模型 ---
# 1. 训练用模型 (is_train=True)
model_pos_train = FinePOSE(args, joints_left, joints_right, is_train=True)

# 2. 临时测试用模型 (用于训练过程中的验证,is_train=False)
# 它的结构和训练模型一样,但设置为评估模式
model_pos_test_temp = FinePOSE(args,joints_left, joints_right, is_train=False)

# 3. 最终评估/采样用模型 (is_train=False)
# 额外传入了 num_proposals 和 sampling_timesteps
# 需要指定生成的采样步数和候选数量
model_pos = FinePOSE(args,joints_left, joints_right,  is_train=False, num_proposals=args.num_proposals, sampling_timesteps=args.sampling_timesteps)

2. 静态文本提示

定义并预编码了 6 个固定的部位描述词。

# 定义预设的文本信息列表 (Prompt List)
# 这些词汇代表了模型应该关注的人体结构和运动属性
pre_text_information = [
    "A person",  # 整体概念:这主要是一个“人”
    "speed",     # 物理属性:关注运动的速度/快慢
    "head",      # 身体部位:头
    "body",      # 身体部位:躯干
    "arm",       # 身体部位:手臂
    "leg",       # 身体部位:腿
]

# 初始化一个空列表,用来存放编码后的 Tensor
pre_text_tensor = []

# 遍历上面的每一个文本词汇
for i in pre_text_information:
    # 1. 编码 (Encoding)
    # 调用之前定义好的 encode_text 函数 (使用 CLIP 模型)
    # 将自然语言字符串 (如 "arm") 转换成高维特征向量
    # 输入: 字符串
    # 输出: Tensor,形状通常是 [1, 77] (CLIP 的 token 序列长度)
    tmp_text = encode_text(i)
    
    # 2. 收集
    # 将转换好的 Tensor 加入列表
    pre_text_tensor.append(tmp_text)

# 3. 拼接 (Concatenation)
# 将列表中的所有 Tensor 在第 0 维拼接起来
# 假设每个 tmp_text 是 [1, 77]
# 拼接后 pre_text_tensor 的形状变成 [6, 77]
# 这里的 6 对应上面 pre_text_information 列表的长度
pre_text_tensor = torch.cat(pre_text_tensor, dim=0)

四、 辅助工具函数

这是一个独立的函数定义,专门用于测试阶段的数据切片。因为 Transformer 需要固定长度(如 243 帧)的输入,而测试视频长度不一。

def eval_data_prepare(receptive_field, inputs_2d, inputs_3d):
    """
    将任意长度的视频序列切割成模型需要的固定长度片段。(测试集里的视频长度是不固定的)
    """
    
    # 1. 维度检查
    # 确保 2D 和 3D 数据的帧数(dim 0)和关节数(dim 1)是一样的
    # [:-1] 表示忽略最后一个维度(坐标维度,2D是2,3D是3),只比前面的
    assert inputs_2d.shape[:-1] == inputs_3d.shape[:-1], "2d and 3d inputs shape must be same! "+str(inputs_2d.shape)+str(inputs_3d.shape)
    
    # 2. 去除 Batch 维度
    # 输入通常是 [1, 总帧数, 17, 2],squeeze 后变成 [总帧数, 17, 2]
    # 方便后续按帧数切片
    inputs_2d_p = torch.squeeze(inputs_2d)
    inputs_3d_p = torch.squeeze(inputs_3d)

    # 3. 计算需要切成几段 (out_num)
    # 逻辑:总帧数 / 感受野。如果有余数,就多切一段。
    # 比如总长 1000,感受野 243。1000 // 243 = 4 余 28。所以需要 4+1 = 5 段。
    if inputs_2d_p.shape[0] / receptive_field > inputs_2d_p.shape[0] // receptive_field: 
        out_num = inputs_2d_p.shape[0] // receptive_field + 1
    elif inputs_2d_p.shape[0] / receptive_field == inputs_2d_p.shape[0] // receptive_field:
        out_num = inputs_2d_p.shape[0] // receptive_field

    # 4. 初始化空容器
    # 创建用来装结果的 Tensor,形状是 [段数, 感受野, 关节数, 坐标数]
    eval_input_2d = torch.empty(out_num, receptive_field, inputs_2d_p.shape[1], inputs_2d_p.shape[2])
    eval_input_3d = torch.empty(out_num, receptive_field, inputs_3d_p.shape[1], inputs_3d_p.shape[2])

    # 5. 正常切片 (处理除了最后一段之外的所有段)
    # 例如前 4 段,直接按顺序切:0-243, 243-486...
    for i in range(out_num - 1):
        eval_input_2d[i,:,:,:] = inputs_2d_p[i*receptive_field : i*receptive_field+receptive_field, :, :]
        eval_input_3d[i,:,:,:] = inputs_3d_p[i*receptive_field : i*receptive_field+receptive_field, :, :]

    # 6. 特殊情况处理:视频总长度比感受野还短
    # 比如视频只有 100 帧,但模型要求 243 帧。
    if inputs_2d_p.shape[0] < receptive_field:
        from torch.nn import functional as F
        # 计算需要补多少帧
        pad_right = receptive_field - inputs_2d_p.shape[0]
        
        # 维度变换:[帧, 关节, 坐标] -> [关节, 坐标, 帧]
        # 因为 F.pad 默认是在最后一个维度进行填充
        inputs_2d_p = rearrange(inputs_2d_p, 'b f c -> f c b')
        # 填充:mode='replicate' 表示复制最后一帧来凑数
        inputs_2d_p = F.pad(inputs_2d_p, (0, pad_right), mode='replicate')
        # 变回原维度
        inputs_2d_p = rearrange(inputs_2d_p, 'f c b -> b f c')
    
    # 同理处理 3D 数据
    if inputs_3d_p.shape[0] < receptive_field:
        pad_right = receptive_field - inputs_3d_p.shape[0]
        inputs_3d_p = rearrange(inputs_3d_p, 'b f c -> f c b')
        inputs_3d_p = F.pad(inputs_3d_p, (0, pad_right), mode='replicate')
        inputs_3d_p = rearrange(inputs_3d_p, 'f c b -> b f c')

    # 7. 处理最后一段
    # 这里的策略非常巧妙:不使用补零填充,而是从视频末尾往前数 243 帧。
    # 比如视频长 1000,最后一段就取 [1000-243 : 1000] 即 [757 : 1000]。
    # 这样虽然和上一段有重叠,但保证了每一帧都有真实数据,没有填充的假数据。
    eval_input_2d[-1,:,:,:] = inputs_2d_p[-receptive_field:, :, :]
    eval_input_3d[-1,:,:,:] = inputs_3d_p[-receptive_field:, :, :]

    return eval_input_2d, eval_input_3d

五、 核心分支:训练

# 如果不是在做评估模式 (args.evaluate 为空),则进入训练流程
if not args.evaluate:
    # 1. 加载训练数据
    # 调用 fetch 函数加载训练集数据 (S1, S5, S6, S7, S8)
    cameras_train, poses_train, poses_train_2d, action_train = fetch(subjects_train, action_filter, subset=args.subset)
    
    # 标志位:是否已经保存过特定条件下的最优模型 (best_epoch_20_10)
    flag_best_20_10 = False

    # 2. 设置优化器
    lr = args.learning_rate
    # 使用 AdamW 优化器,它比 Adam 有更好的权重衰减 (Weight Decay) 处理,防止过拟合
    optimizer = optim.AdamW(model_pos_train.parameters(), lr=lr, weight_decay=0.1)

    # 学习率衰减系数
    lr_decay = args.lr_decay
    
    # 初始化各种 Loss 记录列表
    losses_3d_train = []      # 训练集总体 Loss
    losses_3d_pos_train = []  # 训练集 3D 位置 Loss (MPJPE)
    losses_3d_diff_train = [] # (代码中未使用) 扩散模型 Loss
    losses_3d_train_eval = [] # (代码中未使用) 训练集评估 Loss
    losses_3d_valid = []      # 验证集 Loss
    losses_3d_depth_valid = []# (代码中未使用) 验证集深度 Loss

    epoch = 0
    best_epoch = 0
    initial_momentum = 0.1
    final_momentum = 0.001
    
    # 3. 初始化数据生成器
    # train_generator: 训练用生成器。
    #   - ChunkedGenerator_Seq: 将长视频切成小块 (chunks) 进行训练
    #   - augment=True: 开启数据增强 (翻转、旋转等),增加数据多样性
    #   - shuffle=True: 打乱数据顺序,让模型学得更稳
    train_generator = ChunkedGenerator_Seq(args.batch_size//args.stride, cameras_train, poses_train, poses_train_2d, action_train, args.number_of_frames,
                                       pad=pad, causal_shift=causal_shift, shuffle=True, augment=args.data_augmentation,
                                       kps_left=kps_left, kps_right=kps_right, joints_left=joints_left, joints_right=joints_right)
    
    # train_generator_eval: 仅用于记录帧数,实际训练没用它
    train_generator_eval = UnchunkedGenerator_Seq(cameras_train, poses_train, poses_train_2d, action_train,
                                              pad=pad, causal_shift=causal_shift, augment=False)
    print('INFO: Training on {} frames'.format(train_generator_eval.num_frames()))
    
    if not args.nolog:
        writer.add_text(args.log+'_'+TIMESTAMP + '/Training Frames', str(train_generator_eval.num_frames()))

    # 4. 断点续训 (Resume)
    if args.resume:
        epoch = checkpoint['epoch'] # 恢复 Epoch
        if 'optimizer' in checkpoint and checkpoint['optimizer'] is not None:
            optimizer.load_state_dict(checkpoint['optimizer']) # 恢复优化器状态
            train_generator.set_random_state(checkpoint['random_state']) # 恢复数据生成器的随机状态 (保证数据顺序接得上)
        else:
            print('WARNING: this checkpoint does not contain an optimizer state. The optimizer will be reinitialized.')
        if not args.coverlr:
            lr = checkpoint['lr'] # 恢复学习率
        
        # 强制覆盖学习率 (调试用,通常应注释掉)
        lr = 0.000008 

    print('** Note: reported losses are averaged over all frames.')
    print('** The final evaluation will be carried out after the last training epoch.')

    # 主训练循环 (Epoch Loop)
    while epoch < args.epochs:
        start_time = time()
        # 初始化本 Epoch 的累计 Loss
        epoch_loss_3d_train = 0
        epoch_loss_3d_pos_train = 0
        epoch_loss_3d_diff_train = 0
        epoch_loss_traj_train = 0
        epoch_loss_2d_train_unlabeled = 0
        N = 0 # 样本总数计数器
        N_semi = 0
        # 开启训练模式 (启用 Dropout, BN 更新)
        model_pos_train.train() 
        iteration = 0

        num_batches = train_generator.batch_num()

        # 快速调试开关
        quickdebug = args.debug
        
        # 5. Batch 循环 (Iteration Loop)
        # 从生成器中获取一个 Batch 的数据
        # batch_3d: 3D 真值, batch_2d: 2D 输入, batch_act: 动作标签
        for cameras_train, batch_3d, batch_2d, batch_act in train_generator.next_epoch():
            
            # --- A. 文本特征处理 ---
            # 清洗动作标签 (去除 "Walking 1" 后面的 "1")
            for i in range(c):
                batch_act[i][0] = batch_act[i][0].split(" ")[0]
            
            # input_text = []
            # # 将动作标签编码为 CLIP 特征
            # for i in batch_act:
            #     tmp_text = encode_text(i[0])
            #     input_text.append(tmp_text)
            # input_text = torch.cat(input_text, dim=0) # [Batch, 77]
            input_text = [] # 初始化一个空列表

            # 遍历清洗后的动作标签
            for i in batch_act:
                # i[0] 现在是 "Walking", "Eating" 等纯净字符串
                
                # 调用 encode_text 函数 (这个函数内部调用了 clip.tokenize)
                # 将字符串转换成 CLIP 模型的 Token ID 序列
                # 输出 tmp_text 的形状通常是 [1, 77] (1句话,固定长度77个token)
                tmp_text = encode_text(i[0])
                
                input_text.append(tmp_text)

            # 将列表中的 Tensor 拼接起来
            # 假设 Batch Size = 4
            # 拼接后 input_text 的形状变成 [4, 77]
            input_text = torch.cat(input_text, dim=0)
            # 扩展预设文本特征 (A person, head, body...)
 
            # pre_text_tensor 是我们在代码开头就已经编码好的静态 Tensor
            # 它包含了 ["A person", "speed", "head", "body", "arm", "leg"] 这 6 个词的特征
            # 原始形状是 [6, 77]

            # .unsqueeze(dim=0): 增加一个 Batch 维度
            # 形状变成 [1, 6, 77]
            pre_text_tensor_train = pre_text_tensor.unsqueeze(dim=0)

            # .repeat(input_text.shape[0], 1, 1): 沿着 Batch 维度复制
            # input_text.shape[0] 就是当前的 Batch Size (比如 4)
            # 形状变成 [4, 6, 77]
            pre_text_tensor_train = pre_text_tensor_train.repeat(input_text.shape[0], 1, 1)
            if iteration % 1000 == 0:
                print("%d/%d"% (iteration, num_batches))

            # --- B. 数据转 Tensor 并移至 GPU ---
            if cameras_train is not None:
                cameras_train = torch.from_numpy(cameras_train.astype('float32'))
            inputs_3d = torch.from_numpy(batch_3d.astype('float32'))
            inputs_2d = torch.from_numpy(batch_2d.astype('float32'))

            if torch.cuda.is_available():
                inputs_3d = inputs_3d.cuda()
                inputs_2d = inputs_2d.cuda()
                input_text = input_text.cuda()
                pre_text_tensor_train = pre_text_tensor_train.cuda()
                if cameras_train is not None:
                    cameras_train = cameras_train.cuda()
            
            # --- C. 轨迹处理 ---
            # 备份根节点的轨迹 (Trajectory),预测时不包含轨迹
            inputs_traj = inputs_3d[:, :, :1].clone()
            # 将输入 3D 数据的根节点置零 (Root-relative)
            inputs_3d[:, :, 0] = 0

            optimizer.zero_grad() # 清空梯度

            # --- D. 前向传播 (Forward) ---
            # 预测 3D 姿态
            # 输入: 2D 坐标, 3D 真值(用于扩散模型的去噪目标), 动作文本, 预设文本
            predicted_3d_pos = model_pos_train(inputs_2d, inputs_3d, input_text, pre_text_tensor_train)

            # --- E. 计算 Loss ---
            # 计算 MPJPE (Mean Per Joint Position Error)
            loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
            loss_total = loss_3d_pos
            
            # --- F. 反向传播 (Backward) ---
            loss_total.backward(loss_total.clone().detach())

            # 累计 Loss 用于显示
            loss_total = torch.mean(loss_total)
            epoch_loss_3d_train += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_total.item()
            epoch_loss_3d_pos_train += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
            N += inputs_3d.shape[0] * inputs_3d.shape[1]

            # --- G. 权重更新 ---
            optimizer.step()

            iteration += 1

            if quickdebug: # 如果是调试模式,跑完一个 batch 就退出
                if N==inputs_3d.shape[0] * inputs_3d.shape[1]:
                    break

        # 记录本 Epoch 平均 Loss
        losses_3d_train.append(epoch_loss_3d_train / N)
        losses_3d_pos_train.append(epoch_loss_3d_pos_train / N)

        # Epoch 结束后的验证 (Validation)
        with torch.no_grad(): # 验证不需要梯度,省显存
            # 将训练好的权重加载到测试模型中 (Shadow Model)
            model_pos_test_temp.load_state_dict(model_pos_train.state_dict(), strict=False)
            model_pos_test_temp.eval() # 切换到评估模式

            epoch_loss_3d_valid = None
            epoch_loss_3d_depth_valid = 0
            epoch_loss_traj_valid = 0
            epoch_loss_2d_valid = 0
            epoch_loss_3d_vel = 0
            N = 0
            iteration = 0
            
            if not args.no_eval:
                # 遍历测试集生成器
                for cam, batch, batch_2d, batch_act in test_generator.next_epoch():
                    # ... (数据预处理和文本编码,逻辑同训练集) ...
                    for i in range(batch_act.shape[0]):
                        batch_act[i] = batch_act[i].split(" ")[0]
                    input_text = []
                    for i in batch_act:
                        tmp_text = encode_text(i)
                        input_text.append(tmp_text)
                    input_text = torch.cat(input_text, dim=0)

                    inputs_3d = torch.from_numpy(batch.astype('float32'))
                    inputs_2d = torch.from_numpy(batch_2d.astype('float32'))

                    # --- H. 测试时增强 (TTA) ---
                    # 构造一个水平翻转的 2D 输入 (Flip)
                    # 左右关节点索引交换 (kps_left <-> kps_right)
                    inputs_2d_flip = inputs_2d.clone()
                    inputs_2d_flip[:, :, :, 0] *= -1 # x 坐标取反
                    inputs_2d_flip[:, :, kps_left + kps_right, :] = inputs_2d_flip[:, :, kps_right + kps_left, :]

                    # --- I. 切片处理 (Evaluation Chunking) ---
                    # 调用之前解释过的 eval_data_prepare 将变长视频切成固定长度
                    inputs_3d_p = inputs_3d
                    inputs_2d, inputs_3d = eval_data_prepare(receptive_field, inputs_2d, inputs_3d_p)
                    inputs_2d_flip, _ = eval_data_prepare(receptive_field, inputs_2d_flip, inputs_3d_p)
                    
                    # 扩展文本特征维度以匹配切片后的数据
                    input_text = input_text.repeat(int(inputs_2d.shape[0]/input_text.shape[0]), 1)
                    pre_text_tensor_valid = pre_text_tensor.unsqueeze(dim=0)
                    pre_text_tensor_valid = pre_text_tensor_valid.repeat(input_text.shape[0], 1, 1)

                    if torch.cuda.is_available():
                        inputs_3d = inputs_3d.cuda()
                        inputs_2d = inputs_2d.cuda()
                        inputs_2d_flip = inputs_2d_flip.cuda()
                        input_text = input_text.cuda()
                        pre_text_tensor_valid = pre_text_tensor_valid.cuda()
                    inputs_3d[:, :, 0] = 0 # 去除根节点位移

                    # --- J. 验证集前向传播 ---
                    # 同时输入原始 2D 和翻转 2D,模型内部会处理 TTA
                    predicted_3d_pos = model_pos_test_temp(inputs_2d, inputs_3d, input_text, pre_text_tensor_valid, input_2d_flip=inputs_2d_flip)
                    predicted_3d_pos[:, :, :, :, 0] = 0 # 确保输出的根节点也是 0

                    # 计算验证集误差 (MPJPE Diffusion)
                    error = mpjpe_diffusion(predicted_3d_pos, inputs_3d)

                    if iteration == 0:
                        epoch_loss_3d_valid = inputs_3d.shape[0] * inputs_3d.shape[1] * error.clone()
                    else:
                        epoch_loss_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * error.clone()
                    N += inputs_3d.shape[0] * inputs_3d.shape[1]
                    iteration += 1
                    if quickdebug:
                        if N == inputs_3d.shape[0] * inputs_3d.shape[1]:
                            break

                # 记录验证集平均误差
                losses_3d_valid.append(epoch_loss_3d_valid / N)

        # 计算耗时
        elapsed = (time() - start_time) / 60

        # 打印日志和写入文件
        # ... (打印当前 Epoch 的 Train Loss 和 Valid Loss) ...
        if args.no_eval:
                    print('[%d] time %.2f lr %f 3d_train %f 3d_pos_train %f 3d_diff_train %f' % (
                        epoch + 1,
                        elapsed,
                        lr,
                        losses_3d_train[-1] * 1000,
                        losses_3d_pos_train[-1] * 1000,
                        losses_3d_diff_train[-1] * 1000
                    ))

                    log_path = os.path.join(args.checkpoint, 'training_log.txt')
                    f = open(log_path, mode='a')
                    f.write('[%d] time %.2f lr %f 3d_train %f 3d_pos_train %f 3d_diff_train %f\n' % (
                        epoch + 1,
                        elapsed,
                        lr,
                        losses_3d_train[-1] * 1000,
                        losses_3d_pos_train[-1] * 1000,
                        losses_3d_diff_train[-1] * 1000
                    ))
                    f.close()

        else:
                    print('[%d] time %.2f lr %f 3d_train %f 3d_pos_train %f 3d_pos_valid %f' % (
                        epoch + 1,
                        elapsed,
                        lr,
                        losses_3d_train[-1] * 1000,
                        losses_3d_pos_train[-1] * 1000,
                        losses_3d_valid[-1][0] * 1000
                    ))

                    log_path = os.path.join(args.checkpoint, 'training_log.txt')
                    f = open(log_path, mode='a')
                    f.write('[%d] time %.2f lr %f 3d_train %f 3d_pos_train %f 3d_pos_valid %f\n' % (
                        epoch + 1,
                        elapsed,
                        lr,
                        losses_3d_train[-1] * 1000,
                        losses_3d_pos_train[-1] * 1000,
                        losses_3d_valid[-1][0] * 1000
                    ))
                    f.close()

                    if not args.nolog:
                        writer.add_scalar("Loss/3d validation loss", losses_3d_valid[-1] * 1000, epoch+1)
        if not args.nolog:
                    writer.add_scalar("Loss/3d training loss", losses_3d_train[-1] * 1000, epoch+1)
                    writer.add_scalar("Parameters/learing rate", lr, epoch+1)
                    writer.add_scalar('Parameters/training time per epoch', elapsed, epoch+1)
                # Decay learning rate exponentially
        # 学习率衰减
        lr *= lr_decay
        for param_group in optimizer.param_groups:
            param_group['lr'] *= lr_decay
        epoch += 1


        # 模型保存 (Checkpoint Saving)
        # 1. 定期保存 (每隔 20 epoch)
        if epoch % args.checkpoint_frequency == 0 and epoch > 60:
            chk_path = os.path.join(args.checkpoint, 'epoch_{}.bin'.format(epoch))
            print('Saving checkpoint to', chk_path)

            torch.save({
                'epoch': epoch,
                'lr': lr,
                'random_state': train_generator.random_state(),
                'optimizer': optimizer.state_dict(),
                'model_pos': model_pos_train.state_dict(),
            }, chk_path)

        #### save best checkpoint
        best_chk_path = os.path.join(args.checkpoint, 'best_epoch_1_1.bin')
        best_chk_path_epoch = os.path.join(args.checkpoint, 'best_epoch_20_10.bin')

        # 2. 保存历史最佳模型 (Best Epoch)
        if losses_3d_valid[-1][0] * 1000 < min_loss:
            min_loss = losses_3d_valid[-1] * 1000
            best_epoch = epoch
            print("save best checkpoint")
            torch.save({
                'epoch': epoch,
                'lr': lr,
                'random_state': train_generator.random_state(),
                'optimizer': optimizer.state_dict(),
                'model_pos': model_pos_train.state_dict(),
            }, best_chk_path)
            if epoch >= args.save_emin and args.save_lmin <= losses_3d_valid[-1][0] * 1000 <= args.save_lmax and flag_best_20_10 == False:
                flag_best_20_10 = True
                torch.save({
                    'epoch': epoch,
                    'lr': lr,
                    'random_state': train_generator.random_state(),
                    'optimizer': optimizer.state_dict(),
                    'model_pos': model_pos_train.state_dict(),
                }, best_chk_path_epoch)

            f = open(log_path, mode='a')
            f.write('best epoch\n')
            f.close()
            
            # 3. 保存特定范围内的最佳模型 (用于复现论文指标)
            # 如果 Epoch > 70 且误差在合理范围内,保存 best_epoch_20_10.bin
            if epoch >= args.save_emin and args.save_lmin <= losses_3d_valid[-1][0] * 1000 <= args.save_lmax and flag_best_20_10 == False:
                flag_best_20_10 = True
                torch.save({ ... }, best_chk_path_epoch)

        
        if args.export_training_curves and epoch > 3:
            if 'matplotlib' not in sys.modules:
                import matplotlib
                matplotlib.use('Agg')
                import matplotlib.pyplot as plt

            plt.figure()
            epoch_x = np.arange(3, len(losses_3d_train)) + 1
            plt.plot(epoch_x, losses_3d_train[3:], '--', color='C0')
            plt.plot(epoch_x, losses_3d_train_eval[3:], color='C0')
            plt.plot(epoch_x, losses_3d_valid[3:], color='C1')
            plt.legend(['3d train', '3d train (eval)', '3d valid (eval)'])
            plt.ylabel('MPJPE (m)')
            plt.xlabel('Epoch')
            plt.xlim((3, epoch))
            plt.savefig(os.path.join(args.checkpoint, 'loss_3d.png'))

            plt.close('all')
# Training end

六、 核心分支:可视化与评估

代码末尾处理了 --render 和 else (评估) 两个互斥逻辑

1. 可视化 (if args.render:)

用于生成 Demo 视频

# 分支一:渲染模式 (Visualization/Rendering)
# 如果命令行参数指定了 --render,则进入可视化流程,生成视频
if args.render:
    print('Rendering...')

    # 获取指定受试者、动作、视角的 2D 关键点输入
    input_keypoints = keypoints[args.viz_subject][args.viz_action][args.viz_camera].copy()
    
    # 尝试获取 3D 真值 (Ground Truth),用于对比
    ground_truth = None
    if args.viz_subject in dataset.subjects() and args.viz_action in dataset[args.viz_subject]:
        if 'positions_3d' in dataset[args.viz_subject][args.viz_action]:
            ground_truth = dataset[args.viz_subject][args.viz_action]['positions_3d'][args.viz_camera].copy()
    
    # 如果该动作没有真值 (比如未标记的数据),提示用户
    if ground_truth is None:
        print('INFO: this action is unlabeled. Ground truth will not be rendered.')

    # 构建生成器:这里只包含这一个动作序列
    gen = UnchunkedGenerator_Seq(None, [ground_truth], [input_keypoints],
                             pad=pad, causal_shift=causal_shift, augment=args.test_time_augmentation,
                             kps_left=kps_left, kps_right=kps_right, joints_left=joints_left, joints_right=joints_right)
    
    # 调用 evaluate 获取预测结果 (return_predictions=True 返回坐标而不是误差)
    prediction = evaluate(gen, return_predictions=True)

    # 如果开启了对比模式 (比较 FinePOSE 和 PoseFormer)
    if args.compare:
        from common.model_poseformer import PoseTransformer
        # 初始化 PoseFormer 模型
        model_pf = PoseTransformer(num_frame=81, num_joints=17, in_chans=2, num_heads=8, mlp_ratio=2., qkv_bias=False, qk_scale=None,drop_path_rate=0.1)
        if torch.cuda.is_available():
            model_pf = nn.DataParallel(model_pf)
            model_pf = model_pf.cuda()
        # 获取 PoseFormer 的预测结果
        prediction_pf = evaluate(gen, newmodel=model_pf, return_predictions=True)


    ### 对齐预测结果和真值的形状
    # 因为模型可能是分段预测的,需要拼接回原始视频长度
    if ground_truth.shape[0] / receptive_field > ground_truth.shape[0] // receptive_field: 
        batch_num = (ground_truth.shape[0] // receptive_field) +1
        prediction2 = np.empty_like(ground_truth)
        for i in range(batch_num-1):
            prediction2[i*receptive_field:(i+1)*receptive_field,:,:] = prediction[i,:,:,:]
        left_frames = ground_truth.shape[0] - (batch_num-1)*receptive_field
        prediction2[-left_frames:,:,:] = prediction[-1,-left_frames:,:,:]
        prediction = prediction2
    elif ground_truth.shape[0] / receptive_field == ground_truth.shape[0] // receptive_field:
        prediction.reshape(ground_truth.shape[0], 17, 3)

    # 导出预测的 3D 关节坐标到 .npy 文件 (如果指定了 viz_export)
    if args.viz_export is not None:
        print('Exporting joint positions to', args.viz_export)
        # Predictions are in camera space (注意:这里保存的是相机坐标系数据)
        np.save(args.viz_export, prediction)

    # 生成可视化视频 (如果指定了 viz_output)
    if args.viz_output is not None:
        if ground_truth is not None:
            # Reapply trajectory (恢复绝对轨迹)
            # 之前预测的是相对根节点的姿态,现在把根节点的位移加回去
            trajectory = ground_truth[:, :1]
            ground_truth[:, 1:] += trajectory
            prediction += trajectory
            if args.compare:
                prediction_pf += trajectory

        # 逆转相机变换,转回世界坐标系以进行渲染
        cam = dataset.cameras()[args.viz_subject][args.viz_camera]
        if ground_truth is not None:
            if args.compare:
                prediction_pf = camera_to_world(prediction_pf, R=cam['orientation'], t=cam['translation'])
            prediction = camera_to_world(prediction, R=cam['orientation'], t=cam['translation'])
            ground_truth = camera_to_world(ground_truth, R=cam['orientation'], t=cam['translation'])
        else:
            # 如果没有真值,随便找一个相机参数来用,仅为了可视化
           
            for subject in dataset.cameras():
                if 'orientation' in dataset.cameras()[subject][args.viz_camera]:
                    rot = dataset.cameras()[subject][args.viz_camera]['orientation']
                    break
            if args.compare:
                prediction_pf = camera_to_world(prediction_pf, R=rot, t=0)
                prediction_pf[:, :, 2] -= np.min(prediction_pf[:, :, 2])
            prediction = camera_to_world(prediction, R=rot, t=0)
            
            prediction[:, :, 2] -= np.min(prediction[:, :, 2])
        
        # 准备渲染数据字典
        if args.compare:
            anim_output = {'PoseFormer': prediction_pf}
            anim_output['Ours'] = prediction
        else:
            # 如果没有对比,就只渲染自己的结果 (Reconstruction)
            # 这里的 ground_truth + random 
            anim_output = {'Reconstruction': ground_truth + np.random.normal(loc=0.0, scale=0.1, size=[ground_truth.shape[0], 17, 3])}
        
        if ground_truth is not None and not args.viz_no_ground_truth:
            anim_output['Ground truth'] = ground_truth

        # 将 2D 关键点转回图像像素坐标
        input_keypoints = image_coordinates(input_keypoints[..., :2], w=cam['res_w'], h=cam['res_h'])

        from common.visualization import render_animation
        # 调用渲染函数生成 MP4 视频
        render_animation(input_keypoints, keypoints_metadata, anim_output,
                        dataset.skeleton(), dataset.fps(), args.viz_bitrate, cam['azimuth'], args.viz_output,
                        limit=args.viz_limit, downsample=args.viz_downsample, size=args.viz_size,
                        input_video_path=args.viz_video, viewport=(cam['res_w'], cam['res_h']),
                        input_video_skip=args.viz_skip)

2. 完整评估 (else:)

如果不渲染也不训练,代码会执行这里的逻辑,用于在整个测试集上跑分。

它定义了两个局部函数:

  • fetch_actions: 类似于前面的 fetch,但它是针对特定的动作列表。

  • run_evaluation: 核心评估循环。

# 分支二:标准评估模式 (Evaluation)
# 如果没有开启渲染,则计算 MPJPE 误差指标
else:
    print('Evaluating...')
    # 初始化数据结构,用于按动作 (Action) 重新组织测试集
    all_actions = {}
    all_actions_flatten = []
    all_actions_by_subject = {}
    
    # 遍历测试集受试者 (S9, S11)
    for subject in subjects_test:
        if subject not in all_actions_by_subject:
            all_actions_by_subject[subject] = {}

        # 遍历该受试者的所有动作
        for action in dataset[subject].keys():
            action_name = action.split(' ')[0] # 去掉后缀 "Walking 1" -> "Walking"
            if action_name not in all_actions:
                all_actions[action_name] = []
            if action_name not in all_actions_by_subject[subject]:
                all_actions_by_subject[subject][action_name] = []
            
            # 收集 (Subject, Action) 对
            all_actions[action_name].append((subject, action))
            all_actions_flatten.append((subject, action))
            all_actions_by_subject[subject][action_name].append((subject, action))

    # 内部函数:根据动作列表提取数据
    def fetch_actions(actions):
        out_poses_3d = []
        out_poses_2d = []
        out_camera_params = []
        out_action = []

        for subject, action in actions:
            poses_2d = keypoints[subject][action]
            for i in range(len(poses_2d)):
                out_poses_2d.append(poses_2d[i])
                out_action.append(action)

            poses_3d = dataset[subject][action]['positions_3d']
            assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
            for i in range(len(poses_3d)): 
                out_poses_3d.append(poses_3d[i])

            if subject in dataset.cameras():
                cams = dataset.cameras()[subject]
                assert len(cams) == len(poses_2d), 'Camera count mismatch'
                for cam in cams:
                    if 'intrinsic' in cam:
                        out_camera_params.append(cam['intrinsic'])

        stride = args.downsample
        if stride > 1:
            # Downsample as requested
            for i in range(len(out_poses_2d)):
                out_poses_2d[i] = out_poses_2d[i][::stride]
                if out_poses_3d is not None:
                    out_poses_3d[i] = out_poses_3d[i][::stride]

        return out_camera_params, out_poses_3d, out_poses_2d, out_action

    # 内部函数:执行评估并统计平均误差
    def run_evaluation(actions, action_filter=None):
        errors_p1 = []
        errors_p1_h = []
        errors_p1_mean = []
        errors_p1_select = []

        errors_p2 = []
        errors_p2_h = []
        errors_p2_mean = []
        errors_p2_select = []


        # 遍历每个动作类别
        for action_key in actions.keys():
            if action_filter is not None:
                found = False
                for a in action_filter:
                    if action_key.startswith(a):
                        found = True
                        break
                if not found:
                    continue

            # 获取当前动作的所有数据
            cameras_act, poses_act, poses_2d_act, action_act = fetch_actions(actions[action_key])
            # 构建生成器
            gen = UnchunkedGenerator_Seq(cameras_act, poses_act, poses_2d_act, action_act,
                                     pad=pad, causal_shift=causal_shift, augment=args.test_time_augmentation,
                                     kps_left=kps_left, kps_right=kps_right, joints_left=joints_left,
                                     joints_right=joints_right)

            # 调用 evaluate 计算误差
            if args.p2:
                e1, e1_h, e1_mean, e1_select, e2, e2_h, e2_mean, e2_select = evaluate(gen, action_key)
            else:
                e1, e1_h, e1_mean, e1_select = evaluate(gen, action_key)


            # 收集误差
            errors_p1.append(e1)
            errors_p1_h.append(e1_h)
            errors_p1_mean.append(e1_mean)
            errors_p1_select.append(e1_select)

            if args.p2:
                errors_p2.append(e2)
                errors_p2_h.append(e2_h)
                errors_p2_mean.append(e2_mean)
                errors_p2_select.append(e2_select)


        # 计算所有动作的平均误差 (Action-wise Average)
        errors_p1 = torch.stack(errors_p1)
        errors_p1_actionwise = torch.mean(errors_p1, dim=0)
        errors_p1_h = torch.stack(errors_p1_h)
        errors_p1_actionwise_h = torch.mean(errors_p1_h, dim=0)
        errors_p1_mean = torch.stack(errors_p1_mean)
        errors_p1_actionwise_mean = torch.mean(errors_p1_mean, dim=0)
        errors_p1_select = torch.stack(errors_p1_select)
        errors_p1_actionwise_select = torch.mean(errors_p1_select, dim=0)

        if args.p2:
            errors_p2 = torch.stack(errors_p2)
            errors_p2_actionwise = torch.mean(errors_p2, dim=0)
            errors_p2_h = torch.stack(errors_p2_h)
            errors_p2_actionwise_h = torch.mean(errors_p2_h, dim=0)
            errors_p2_mean = torch.stack(errors_p2_mean)
            errors_p2_actionwise_mean = torch.mean(errors_p2_mean, dim=0)
            errors_p2_select = torch.stack(errors_p2_select)
            errors_p2_actionwise_select = torch.mean(errors_p2_select, dim=0)

        # 打印结果并写入日志
        log_path = os.path.join(args.checkpoint, 'h36m_test_log_H%d_K%d.txt' %(args.num_proposals, args.sampling_timesteps))
        f = open(log_path, mode='a')
        for ii in range(errors_p1_actionwise.shape[0]):
            print('step %d Protocol #1   (MPJPE) action-wise average J_Best: %f mm' % (ii, errors_p1_actionwise[ii].item()))
            f.write('step %d Protocol #1   (MPJPE) action-wise average J_Best: %f mm\n' % (ii, errors_p1_actionwise[ii].item()))
            print('step %d Protocol #1   (MPJPE) action-wise average P_Best: %f mm' % (ii, errors_p1_actionwise_h[ii].item()))
            f.write('step %d Protocol #1   (MPJPE) action-wise average P_Best: %f mm\n' % (ii, errors_p1_actionwise_h[ii].item()))
            print('step %d Protocol #1   (MPJPE) action-wise average P_Agg: %f mm' % (ii, errors_p1_actionwise_mean[ii].item()))
            f.write('step %d Protocol #1   (MPJPE) action-wise average P_Agg: %f mm\n' % (ii, errors_p1_actionwise_mean[ii].item()))
            print('step %d Protocol #1   (MPJPE) action-wise average J_Agg: %f mm' % (
            ii, errors_p1_actionwise_select[ii].item()))
            f.write('step %d Protocol #1   (MPJPE) action-wise average J_Agg: %f mm\n' % (
            ii, errors_p1_actionwise_select[ii].item()))

            if args.p2:
                print('step %d Protocol #2   (MPJPE) action-wise average J_Best: %f mm' % (ii, errors_p2_actionwise[ii].item()))
                f.write('step %d Protocol #2   (MPJPE) action-wise average J_Best: %f mm\n' % (ii, errors_p2_actionwise[ii].item()))
                print('step %d Protocol #2   (MPJPE) action-wise average P_Best: %f mm' % (
                ii, errors_p2_actionwise_h[ii].item()))
                f.write('step %d Protocol #2   (MPJPE) action-wise average P_Best: %f mm\n' % (
                ii, errors_p2_actionwise_h[ii].item()))
                print('step %d Protocol #2   (MPJPE) action-wise average P_Agg: %f mm' % (
                ii, errors_p2_actionwise_mean[ii].item()))
                f.write('step %d Protocol #2   (MPJPE) action-wise average P_Agg: %f mm\n' % (
                ii, errors_p2_actionwise_mean[ii].item()))
                print('step %d Protocol #2   (MPJPE) action-wise average J_Agg: %f mm' % (
                    ii, errors_p2_actionwise_select[ii].item()))
                f.write('step %d Protocol #2   (MPJPE) action-wise average J_Agg: %f mm\n' % (
                    ii, errors_p2_actionwise_select[ii].item()))
        f.close()


    # 执行评估入口
    if not args.by_subject:
        run_evaluation(all_actions, action_filter)
    else:
        for subject in all_actions_by_subject.keys():
            print('Evaluating on subject', subject)
            run_evaluation(all_actions_by_subject[subject], action_filter)
            print('')

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值