浮点数保留小数点位数的做法之一: Math.round(_value * n) / n;

本文介绍了如何通过乘法操作来精确控制浮点数的小数点后位数,避免因直接进行除法运算而导致的精度损失。文中给出了具体的数学公式,并提供了实现代码示例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

例如:

保留小数点后1位,则 n = 10        * n= 1 /  0.1

保留小数点后2位,则 n = 100      * n= 1 /  0.01

保留小数点后3位,则 n = 1000    * n= 1 /  0.001


至于为什么是先乘后除,可以参考如下文章:

http://www.cnblogs.com/JeffreyZhao/archive/2009/11/24/precision-of-float-point-calculation.html


基于乘法不会损失精度(有条件的),所以对于精度有要求的场合,上面的公式可以改为:

 Math.round(_value * 10) * 0.1

 Math.round(_value * 100) * 0.01

 Math.round(_value * 1000) * 0.001

import json import os from math import trunc import jsonlines from collections import defaultdict from typing import Dict, List, Any, Tuple import logging import time from dataclasses import dataclass, field # 配置日志 logging.basicConfig( level=logging.INFO, format=&#39;%(asctime)s - %(name)s - %(levelname)s - %(message)s&#39;, handlers=[ logging.FileHandler("collision_processing.log"), logging.StreamHandler() ] ) logger = logging.getLogger("CollisionProcessor") # 数据类定义 @dataclass class FrameData: timestamp: float road_points: List[Tuple[float, float]] record: Dict[str, Any] group_key: str = "" scene_id: str = "" processed: bool = False @dataclass class CameraStats: cam_tag: str total_frames: int = 0 yes_count: int = 0 no_count: int = 0 @dataclass class SceneStats: scene_id: str total_cam_tags: int = 0 total_frames: int = 0 yes_count: int = 0 no_count: int = 0 cam_tags: Dict[str, CameraStats] = field(default_factory=dict) @dataclass class OverallStats: total_scenes: int = 0 total_frames: int = 0 yes_count: int = 0 no_count: int = 0 def extract_group_key(image_path: str) -> str: """解析文件路径中的相机标签""" try: parts = image_path.replace("\\", "/").strip("/").split("/") return parts[-2] if len(parts) >= 2 else "default_group" except Exception as e: logger.error(f"提取分组键错误: {e}, 路径: {image_path}") return "default_group" def round_coordinate(value: float, decimals: int = 2) -> float: """四舍五入坐标值到指定小数位数""" if value is None: return 0.0 factor = 10 ** decimals return round(value * factor) / factor def process_road_points(road_points: List[Dict[str, float]]) -> List[Tuple[float, float]]: """处理道路点数据,提取并四舍五入坐标""" if not road_points: return [] return [ (round_coordinate(point.get(&#39;x&#39;, 0.0)), round_coordinate(point.get(&#39;y&#39;, 0.0))) for point in road_points ] def calculate_overlap_ratio( points1: List[Tuple[float, float]], points2: List[Tuple[float, float]], tolerance: float = 0.01 ) -> float: """计算两组点之间的重叠率""" if not points1 or not points2: return 0.0 # 使用集合计算重叠率 set1 = {(trunc(x), trunc(y)) for x, y in points1} set2 = {(trunc(x), trunc(y)) for x, y in points2} # set1 = {(round(x, 2), round(y, 2)) for x, y in points1} # set2 = {(round(x, 2), round(y, 2)) for x, y in points2} intersection = set1 & set2 union = set1 | set2 return len(intersection) / len(union) if union else 0.0 def load_data(input_file: str) -> Dict[str, Dict[str, List[FrameData]]]: """从JSONL文件加载数据并分组""" grouped_data = defaultdict(lambda: defaultdict(list)) total_records = 0 logger.info(f"开始读取输入文件: {input_file}") try: with open(input_file, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: for line_num, line in enumerate(f, 1): try: record = json.loads(line.strip()) scene_id = record.get(&#39;scene_id&#39;, &#39;unknown_scene&#39;) image_path = record.get(&#39;image_path&#39;, &#39;&#39;) timestamp = record.get(&#39;timestamp&#39;, 0.0) # 提取道路点信息 mop_info = record.get(&#39;mop_pnc_info&#39;, {}) if not isinstance(mop_info, dict): mop_info = {} road_points = mop_info.get(&#39;roadPoints&#39;, []) # 处理道路点 processed_points = process_road_points(road_points) # 提取分组键 group_key = extract_group_key(image_path) # 创建FrameData对象 frame_data = FrameData( timestamp=timestamp, road_points=processed_points, record=record, group_key=group_key, scene_id=scene_id ) # 添加到分组数据 grouped_data[scene_id][group_key].append(frame_data) total_records += 1 except json.JSONDecodeError as e: logger.error(f"JSON解析错误 (行 {line_num}): {e}") except Exception as e: logger.error(f"处理记录错误 (行 {line_num}): {e}") except IOError as e: logger.error(f"文件读取错误: {e}") return {} logger.info(f"成功读取 {total_records} 条记录,分组到 {len(grouped_data)} 个场景") return grouped_data def process_frame_group( frames: List[FrameData], threshold: float = 0.7, debug: bool = False ) -> CameraStats: """处理一组帧数据,使用新的打标逻辑""" if not frames: return CameraStats(cam_tag="") # 获取相机标签 cam_tag = frames[0].group_key cam_stats = CameraStats(cam_tag=cam_tag) cam_stats.total_frames = len(frames) # 按时间戳降序排序(最新时间在前) frames.sort(key=lambda x: x.timestamp, reverse=True) # 标记是否已出现第一个no no_occurred = False # 处理所有帧 for i, frame in enumerate(frames): if i == 0: # 第一帧(最新帧) frame.record[&#39;mask&#39;] = &#39;yes&#39; cam_stats.yes_count += 1 if debug: logger.debug(f"相机 {cam_tag}: 帧0 (最新) 标签设置为 &#39;yes&#39;") elif i == 1: # 第二帧 # 与第一帧比较 overlap = calculate_overlap_ratio( frames[0].road_points, frame.road_points ) if overlap >= threshold: frame.record[&#39;mask&#39;] = &#39;yes&#39; cam_stats.yes_count += 1 if debug: logger.debug(f"相机 {cam_tag}: 帧1 与帧0重叠度 {overlap:.2f} >= {threshold} -> 标签设置为 &#39;yes&#39;") else: frame.record[&#39;mask&#39;] = &#39;no&#39; cam_stats.no_count += 1 no_occurred = True # 标记已出现第一个no if debug: logger.debug( f"相机 {cam_tag}: 帧1 与帧0重叠度 {overlap:.2f} < {threshold} -> 标签设置为 &#39;no&#39; (首次出现)") else: # 第三帧及以后 if no_occurred: # 一旦出现第一个no,后续所有帧都是no frame.record[&#39;mask&#39;] = &#39;no&#39; cam_stats.no_count += 1 if debug: logger.debug(f"相机 {cam_tag}: 帧{i} 已出现no -> 标签设置为 &#39;no&#39;") else: # 与前一帧比较 overlap = calculate_overlap_ratio( frames[i - 1].road_points, frame.road_points ) if overlap >= threshold: frame.record[&#39;mask&#39;] = frames[i - 1].record[&#39;mask&#39;] if frame.record[&#39;mask&#39;] == &#39;yes&#39;: cam_stats.yes_count += 1 else: cam_stats.no_count += 1 if debug: logger.debug( f"相机 {cam_tag}: 帧{i} 与帧{i - 1}重叠度 {overlap:.2f} >= {threshold} -> 继承标签 &#39;{frame.record[&#39;mask&#39;]}&#39;") else: frame.record[&#39;mask&#39;] = &#39;no&#39; cam_stats.no_count += 1 no_occurred = True # 标记已出现第一个no if debug: logger.debug( f"相机 {cam_tag}: 帧{i} 与帧{i - 1}重叠度 {overlap:.2f} < {threshold} -> 标签设置为 &#39;no&#39; (首次出现)") frame.processed = True return cam_stats def process_collision_data( input_file: str, output_file: str, stats_file: str, threshold: float = 0.7, debug: bool = False ) -> Dict[str, Any]: """处理碰撞数据的核心函数""" start_time = time.time() logger.info(f"开始处理碰撞数据: {input_file}") # 加载数据 grouped_data = load_data(input_file) if not grouped_data: logger.error("未加载到有效数据,处理终止") return {} # 初始化统计数据结构 overall_stats = OverallStats(total_scenes=len(grouped_data)) scene_stats_dict = {} labeled_data = [] # 处理每个场景 for scene_id, cam_groups in grouped_data.items(): scene_stats = SceneStats( scene_id=scene_id, total_cam_tags=len(cam_groups) ) # 处理每个相机组 for group_key, frames in cam_groups.items(): cam_stats = process_frame_group(frames, threshold, debug) # 更新场景统计 scene_stats.cam_tags[group_key] = cam_stats scene_stats.total_frames += cam_stats.total_frames scene_stats.yes_count += cam_stats.yes_count scene_stats.no_count += cam_stats.no_count # 收集处理后的数据 labeled_data.extend(frame.record for frame in frames if frame.processed) # 更新总体统计 scene_stats_dict[scene_id] = scene_stats overall_stats.total_frames += scene_stats.total_frames overall_stats.yes_count += scene_stats.yes_count overall_stats.no_count += scene_stats.no_count # 按scene_id、cam_tag和timestamp降序排序 labeled_data.sort(key=lambda x: ( x.get(&#39;scene_id&#39;, &#39;&#39;), x.get(&#39;image_path&#39;, &#39;&#39;).split(&#39;/&#39;)[-2] if &#39;image_path&#39; in x else &#39;&#39;, -x.get(&#39;timestamp&#39;, 0) )) # 准备输出统计信息 stats = { "stats_file": os.path.basename(stats_file), "total_scenes": overall_stats.total_scenes, "scenes": {}, "overall": { "total_frames": overall_stats.total_frames, "yes_count": overall_stats.yes_count, "no_count": overall_stats.no_count } } # 添加场景统计 for scene_id, scene_stats in scene_stats_dict.items(): stats["scenes"][scene_id] = { "scene_id": scene_stats.scene_id, "total_cam_tags": scene_stats.total_cam_tags, "total_frames": scene_stats.total_frames, "yes_count": scene_stats.yes_count, "no_count": scene_stats.no_count, "cam_tags": {} } # 添加相机统计 for cam_tag, cam_stats in scene_stats.cam_tags.items(): stats["scenes"][scene_id]["cam_tags"][cam_tag] = { "cam_tag": cam_stats.cam_tag, "total_frames": cam_stats.total_frames, "yes_count": cam_stats.yes_count, "no_count": cam_stats.no_count } # 确保输出目录存在 os.makedirs(os.path.dirname(output_file) or &#39;.&#39;, exist_ok=True) os.makedirs(os.path.dirname(stats_file) or &#39;.&#39;, exist_ok=True) # 写入输出文件 logger.info(f"写入处理后的数据到: {output_file}") with jsonlines.open(output_file, &#39;w&#39;) as writer: for record in labeled_data: writer.write(record) # 写入统计文件 logger.info(f"写入统计信息到: {stats_file}") with open(stats_file, &#39;w&#39;, encoding=&#39;utf-8&#39;) as f: json.dump(stats, f, ensure_ascii=False, indent=4) # 计算处理时间 processing_time = time.time() - start_time logger.info(f"处理完成! 总耗时: {processing_time:.2f}秒") return stats if __name__ == "__main__": # 使用指定的文件路径调用处理函数 stats = process_collision_data( #input_file="./basicData_historyDepth_v2_img106k_250723__huanshi_6ver_4fish_large_model_900_792874_10frames.jsonl", input_file="./basicData_realCollision_v2_img42k_250726__real_collision_dataset_pt1_to_8.jsonl", #input_file="./basicData_vggtHDepth_v2_img42k_250725__huanshi_6ver_6pinhole__large_model_900_792874_10frames__pt1__250722.jsonl", output_file="processed_42k_0726_tag03.jsonl", stats_file="collision_stats_0726_tag03.json", threshold=0.1, debug=False # 设置为True可查看详细处理日志 ) # 打印摘要统计 if stats: total_frames = stats[&#39;overall&#39;][&#39;total_frames&#39;] yes_count = stats[&#39;overall&#39;][&#39;yes_count&#39;] no_count = stats[&#39;overall&#39;][&#39;no_count&#39;] print("\n处理完成!") print(f"总场景数: {stats[&#39;total_scenes&#39;]}") print(f"总帧数: {total_frames}") print(f"YES标签数: {yes_count} ({yes_count / total_frames:.2%})") print(f"NO标签数: {no_count} ({no_count / total_frames:.2%})") print(f"输出文件: processed_106k_0723_tag03.jsonl") print(f"统计文件: collision_stats_0723_tag03.json") else: print("处理失败,请检查日志文件获取详细信息") 修改代码对roadpoint的值取小数点后两
08-01
import json import os from math import trunc import jsonlines from collections import defaultdict from typing import Dict, List, Any, Tuple import logging import time from dataclasses import dataclass, field 配置日志 logging.basicConfig( level=logging.INFO, format=‘%(asctime)s - %(name)s - %(levelname)s - %(message)s’, handlers=[ logging.FileHandler(“collision_processing.log”), logging.StreamHandler() ] ) logger = logging.getLogger(“CollisionProcessor”) 数据类定义 @dataclass class FrameData: timestamp: float road_points: List[Tuple[float, float]] record: Dict[str, Any] group_key: str = “” scene_id: str = “” processed: bool = False @dataclass class CameraStats: cam_tag: str total_frames: int = 0 yes_count: int = 0 no_count: int = 0 @dataclass class SceneStats: scene_id: str total_cam_tags: int = 0 total_frames: int = 0 yes_count: int = 0 no_count: int = 0 cam_tags: Dict[str, CameraStats] = field(default_factory=dict) @dataclass class OverallStats: total_scenes: int = 0 total_frames: int = 0 yes_count: int = 0 no_count: int = 0 def extract_group_key(image_path: str) -> str: “”“解析文件路径中的相机标签”“” try: parts = image_path.replace(“\”, “/”).strip(“/”).split(“/”) return parts[-2] if len(parts) >= 2 else “default_group” except Exception as e: logger.error(f"提取分组键错误: {e}, 路径: {image_path}") return “default_group” def round_coordinate(value: float, decimals: int = 2) -> float: “”“四舍五入坐标值到指定小数位数”“” if value is None: return 0.0 factor = 10 ** decimals return round(value * factor) / factor def process_road_points(road_points: List[Dict[str, float]]) -> List[Tuple[float, float]]: “”“处理道路点数据,提取并四舍五入坐标”“” if not road_points: return [] return [ (round_coordinate(point.get(‘x’, 0.0)), round_coordinate(point.get(‘y’, 0.0))) for point in road_points ] def calculate_overlap_ratio( points1: List[Tuple[float, float]], points2: List[Tuple[float, float]], tolerance: float = 0.01 ) -> float: “”“计算两组点之间的重叠率”“” if not points1 or not points2: return 0.0 # 使用集合计算重叠率 set1 = {(trunc(x), trunc(y)) for x, y in points1} set2 = {(trunc(x), trunc(y)) for x, y in points2} # set1 = {(round(x, 2), round(y, 2)) for x, y in points1} # set2 = {(round(x, 2), round(y, 2)) for x, y in points2} intersection = set1 & set2 union = set1 | set2 return len(intersection) / len(union) if union else 0.0 def load_data(input_file: str) -> Dict[str, Dict[str, List[FrameData]]]: “”“从JSONL文件加载数据并分组”“” grouped_data = defaultdict(lambda: defaultdict(list)) total_records = 0 logger.info(f"开始读取输入文件: {input_file}") try: with open(input_file, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: for line_num, line in enumerate(f, 1): try: record = json.loads(line.strip()) scene_id = record.get(&#39;scene_id&#39;, &#39;unknown_scene&#39;) image_path = record.get(&#39;image_path&#39;, &#39;&#39;) timestamp = record.get(&#39;timestamp&#39;, 0.0) # 提取道路点信息 mop_info = record.get(&#39;mop_pnc_info&#39;, {}) if not isinstance(mop_info, dict): mop_info = {} road_points = mop_info.get(&#39;roadPoints&#39;, []) # 处理道路点 processed_points = process_road_points(road_points) # 提取分组键 group_key = extract_group_key(image_path) # 创建FrameData对象 frame_data = FrameData( timestamp=timestamp, road_points=processed_points, record=record, group_key=group_key, scene_id=scene_id ) # 添加到分组数据 grouped_data[scene_id][group_key].append(frame_data) total_records += 1 except json.JSONDecodeError as e: logger.error(f"JSON解析错误 (行 {line_num}): {e}") except Exception as e: logger.error(f"处理记录错误 (行 {line_num}): {e}") except IOError as e: logger.error(f"文件读取错误: {e}") return {} logger.info(f"成功读取 {total_records} 条记录,分组到 {len(grouped_data)} 个场景") return grouped_data def process_frame_group( frames: List[FrameData], threshold: float = 0.7, debug: bool = False ) -> CameraStats: “”“处理一组帧数据,使用新的打标逻辑”“” if not frames: return CameraStats(cam_tag=“”) # 获取相机标签 cam_tag = frames[0].group_key cam_stats = CameraStats(cam_tag=cam_tag) cam_stats.total_frames = len(frames) # 按时间戳降序排序(最新时间在前) frames.sort(key=lambda x: x.timestamp, reverse=True) # 标记是否已出现第一个no no_occurred = False # 处理所有帧 for i, frame in enumerate(frames): if i == 0: # 第一帧(最新帧) frame.record[&#39;mask&#39;] = &#39;yes&#39; cam_stats.yes_count += 1 if debug: logger.debug(f"相机 {cam_tag}: 帧0 (最新) 标签设置为 &#39;yes&#39;") elif i == 1: # 第二帧 # 与第一帧比较 overlap = calculate_overlap_ratio( frames[0].road_points, frame.road_points ) if overlap >= threshold: frame.record[&#39;mask&#39;] = &#39;yes&#39; cam_stats.yes_count += 1 if debug: logger.debug(f"相机 {cam_tag}: 帧1 与帧0重叠度 {overlap:.2f} >= {threshold} -> 标签设置为 &#39;yes&#39;") else: frame.record[&#39;mask&#39;] = &#39;no&#39; cam_stats.no_count += 1 no_occurred = True # 标记已出现第一个no if debug: logger.debug( f"相机 {cam_tag}: 帧1 与帧0重叠度 {overlap:.2f} < {threshold} -> 标签设置为 &#39;no&#39; (首次出现)") else: # 第三帧及以后 if no_occurred: # 一旦出现第一个no,后续所有帧都是no frame.record[&#39;mask&#39;] = &#39;no&#39; cam_stats.no_count += 1 if debug: logger.debug(f"相机 {cam_tag}: 帧{i} 已出现no -> 标签设置为 &#39;no&#39;") else: # 与前一帧比较 overlap = calculate_overlap_ratio( frames[i - 1].road_points, frame.road_points ) if overlap >= threshold: frame.record[&#39;mask&#39;] = frames[i - 1].record[&#39;mask&#39;] if frame.record[&#39;mask&#39;] == &#39;yes&#39;: cam_stats.yes_count += 1 else: cam_stats.no_count += 1 if debug: logger.debug( f"相机 {cam_tag}: 帧{i} 与帧{i - 1}重叠度 {overlap:.2f} >= {threshold} -> 继承标签 &#39;{frame.record[&#39;mask&#39;]}&#39;") else: frame.record[&#39;mask&#39;] = &#39;no&#39; cam_stats.no_count += 1 no_occurred = True # 标记已出现第一个no if debug: logger.debug( f"相机 {cam_tag}: 帧{i} 与帧{i - 1}重叠度 {overlap:.2f} < {threshold} -> 标签设置为 &#39;no&#39; (首次出现)") frame.processed = True return cam_stats def process_collision_data( input_file: str, output_file: str, stats_file: str, threshold: float = 0.7, debug: bool = False ) -> Dict[str, Any]: “”“处理碰撞数据的核心函数”“” start_time = time.time() logger.info(f"开始处理碰撞数据: {input_file}") # 加载数据 grouped_data = load_data(input_file) if not grouped_data: logger.error("未加载到有效数据,处理终止") return {} # 初始化统计数据结构 overall_stats = OverallStats(total_scenes=len(grouped_data)) scene_stats_dict = {} labeled_data = [] # 处理每个场景 for scene_id, cam_groups in grouped_data.items(): scene_stats = SceneStats( scene_id=scene_id, total_cam_tags=len(cam_groups) ) # 处理每个相机组 for group_key, frames in cam_groups.items(): cam_stats = process_frame_group(frames, threshold, debug) # 更新场景统计 scene_stats.cam_tags[group_key] = cam_stats scene_stats.total_frames += cam_stats.total_frames scene_stats.yes_count += cam_stats.yes_count scene_stats.no_count += cam_stats.no_count # 收集处理后的数据 labeled_data.extend(frame.record for frame in frames if frame.processed) # 更新总体统计 scene_stats_dict[scene_id] = scene_stats overall_stats.total_frames += scene_stats.total_frames overall_stats.yes_count += scene_stats.yes_count overall_stats.no_count += scene_stats.no_count # 按scene_id、cam_tag和timestamp降序排序 labeled_data.sort(key=lambda x: ( x.get(&#39;scene_id&#39;, &#39;&#39;), x.get(&#39;image_path&#39;, &#39;&#39;).split(&#39;/&#39;)[-2] if &#39;image_path&#39; in x else &#39;&#39;, -x.get(&#39;timestamp&#39;, 0) )) # 准备输出统计信息 stats = { "stats_file": os.path.basename(stats_file), "total_scenes": overall_stats.total_scenes, "scenes": {}, "overall": { "total_frames": overall_stats.total_frames, "yes_count": overall_stats.yes_count, "no_count": overall_stats.no_count } } # 添加场景统计 for scene_id, scene_stats in scene_stats_dict.items(): stats["scenes"][scene_id] = { "scene_id": scene_stats.scene_id, "total_cam_tags": scene_stats.total_cam_tags, "total_frames": scene_stats.total_frames, "yes_count": scene_stats.yes_count, "no_count": scene_stats.no_count, "cam_tags": {} } # 添加相机统计 for cam_tag, cam_stats in scene_stats.cam_tags.items(): stats["scenes"][scene_id]["cam_tags"][cam_tag] = { "cam_tag": cam_stats.cam_tag, "total_frames": cam_stats.total_frames, "yes_count": cam_stats.yes_count, "no_count": cam_stats.no_count } # 确保输出目录存在 os.makedirs(os.path.dirname(output_file) or &#39;.&#39;, exist_ok=True) os.makedirs(os.path.dirname(stats_file) or &#39;.&#39;, exist_ok=True) # 写入输出文件 logger.info(f"写入处理后的数据到: {output_file}") with jsonlines.open(output_file, &#39;w&#39;) as writer: for record in labeled_data: writer.write(record) # 写入统计文件 logger.info(f"写入统计信息到: {stats_file}") with open(stats_file, &#39;w&#39;, encoding=&#39;utf-8&#39;) as f: json.dump(stats, f, ensure_ascii=False, indent=4) # 计算处理时间 processing_time = time.time() - start_time logger.info(f"处理完成! 总耗时: {processing_time:.2f}秒") return stats if name == “main”: # 使用指定的文件路径调用处理函数 stats = process_collision_data( #input_file=“./basicData_historyDepth_v2_img106k_250723__huanshi_6ver_4fish_large_model_900_792874_10frames.jsonl”, input_file=“./basicData_realCollision_v2_img42k_250726__real_collision_dataset_pt1_to_8.jsonl”, #input_file=“./basicData_vggtHDepth_v2_img42k_250725__huanshi_6ver_6pinhole__large_model_900_792874_10frames__pt1__250722.jsonl”, output_file=“processed_42k_0726_tag03.jsonl”, stats_file=“collision_stats_0726_tag03.json”, threshold=0.1, debug=False # 设置为True可查看详细处理日志 ) 修改数据处理逻辑:按照scene_id分组,按照image_path按照/分割后取倒数第二个值分组,分组内按照timestamp降序排列,然后进行处理:默认分组内第一条数据的mask = "yes",后续的处理逻辑为取出前一帧数据的roadpoint的坐标值列表和当前数据的roadpoint值进行取交集和并集,允许数据值误差0.01,求交集/丙级>0.7的继承前一条数据的mask,否则对前一条数据的mask取否,当第一次出现no后,该分组的后续数据都标记为no。
08-01
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值