list.append()与list.expand()

本文详细介绍了Python中列表的两种常见操作:append与extend的区别。通过具体的示例代码展示了如何使用这两种方法来修改列表,并解释了它们在处理不同类型的输入时的行为差异。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

alist=[1,2]] >>>[1,2]
alist.append([3,4]) >>>[1, 2, [3, 4]]
alist.extend([3,4]) >>>[1, 2, 3, 4]
结论:
list.append(arg1) 参数类型任意,可以往已有列表中添加元素,若添加的是列表,就该列表被当成一个元素存在原列表中,只使list长度增加1.

list.extend(list1) 参数必须是列表类型,可以将参数中的列表合并到原列表的末尾,使原来的 list长度增加len(list1)。

import sys sys.path.append('/data/coding') # 添加包所在的根目录到 Python 路径中,防止找不到 tensor2robot 和 robotics_transformer python data_base.py import tensorflow as tf import numpy as np import tf_agents from tf_agents.networks import sequential from keras.layers import Dense from tf_agents.agents.dqn import dqn_agent from tf_agents.utils import common from typing import Type from tf_agents.networks import network from tensor2robot.utils import tensorspec_utils from tf_agents.specs import tensor_spec from robotics_transformer import sequence_agent from tf_agents.trajectories import time_step as ts from tensorflow_datasets.core.data_sources import array_record import tensorflow_datasets as tfds from robotics_transformer import transformer_network from robotics_transformer import transformer_network_test_set_up import collections from tf_agents.replay_buffers import reverb_replay_buffer from tf_agents.replay_buffers import reverb_utils import reverb import cv2 from scipy.spatial.transform import Rotation as R # import matplotlib.pyplot as plt """ 对句子编码,并保存,后续直接取出 """ instruction_datasets_path = './datasets/instruction' # savedmodel_path = "./universal_sentence_encoder/policy" # 句子编码器 # # 待编码的指令 # instruction_abdomen = tf.constant(['move to abdomen'], dtype=tf.string) # instruction_neck = tf.constant(['move to neck'], dtype=tf.string) # saved_policy = tf.compat.v2.saved_model.load(savedmodel_path) # sentence_encoder = saved_policy.signatures['serving_default'] # embedding_abdomen = sentence_encoder( # inputs = instruction_abdomen, # ) # embedding_neck = sentence_encoder( # inputs = instruction_neck, # ) # embedding_abdomen = embedding_abdomen['outputs'].numpy()[0] # embedding_neck = embedding_neck['outputs'].numpy()[0] # embedding_abdomen = tf.convert_to_tensor(embedding_abdomen, dtype=tf.float32) # embedding_neck = tf.convert_to_tensor(embedding_neck, dtype=tf.float32) # embedding_abdomen = tf.expand_dims(embedding_abdomen, axis=0) # embedding_neck = tf.expand_dims(embedding_neck, axis=0) # instruction_data = { # "move_to_abdomen" : embedding_abdomen, # "move_to_neck" : embedding_neck, # } # instruction_dataset = tf.data.Dataset.from_tensor_slices(instruction_data) # 在数据集中被重新切分为 BATCH 个元素 # # 保存指令编码 # instruction_dataset.save(instruction_datasets_path) # 取出保存好的指令 load_instruction_datasets = tf.data.Dataset.load(instruction_datasets_path) # 加载 # 创建迭代器 load_instruction_iterator = iter(load_instruction_datasets) sample_instruction_data = next(load_instruction_iterator) # 腹部指令 natural_language_embedding_abdomen = sample_instruction_data['move_to_abdomen'].numpy() natural_language_instruction_abdomen = b'move to abdomen' # print(natural_language_embedding_abdomen) # 颈部指令 #natural_language_embedding_neck = sample_instruction_data['move_to_neck'].numpy() #natural_language_instruction_neck = b'move to neck' # print(natural_language_embedding_neck) """ 工具坐标系齐次矩阵 """ def calculate_Tt0(q_input): # 关节角度校准 q_input[5]+=3.141592653 # D-H参数 alpla_array = np.array([3.14159, 1.57077, -1.57106, 1.57077, -1.57296, -1.56984, -1.57347], dtype=np.float64) # 单根连杆扭角α d_array = np.array([-0.2985, -0.000211357, -0.459573, -0.000396759, -0.457368, -0.00569921, -0.1059], dtype=np.float64) # 杆和杆之间的距离d a_array = np.array([0, -0.000770217, 0.000258101, 0.0659865, -0.0529691, 5.09759e-05, 0.0770263], dtype=np.float64) # 单根连杆长度a # 初始化位姿变换矩阵数组 T = np.empty(7, dtype=object) # 给位姿变换矩阵赋值 for i in range(7): T[i] = np.array([ [np.cos(q_input[i]), -np.sin(q_input[i]), 0, a_array[i]], [np.sin(q_input[i]) * np.cos(alpla_array[i]), np.cos(q_input[i]) * np.cos(alpla_array[i]), -np.sin(alpla_array[i]), np.sin(alpla_array[i]) * (-d_array[i])], [np.sin(q_input[i]) * np.sin(alpla_array[i]), np.cos(q_input[i]) * np.sin(alpla_array[i]), np.cos(alpla_array[i]), d_array[i] * np.cos(alpla_array[i])], [0, 0, 0, 1] ]) # 工具坐标系旋转矩阵 Tt7 = np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, -0.215], [0, 0, 0, 1] ]) # 计算末端执行器的位姿变换矩阵 T70 T10 = T[0] T20 = T10.dot(T[1]) T30 = T20.dot(T[2]) T40 = T30.dot(T[3]) T50 = T40.dot(T[4]) T60 = T50.dot(T[5]) T70 = T60.dot(T[6]) # 计算工具坐标系的位姿变换矩阵 Tt0 # Tt0 = Tt7.dot(T70) Tt0 = T70.dot(Tt7) return Tt0 """ 取数据 """ my_dir_path = "/data/coding/colour1/1" # 收集的数据路径 image_list = [] # image_aligned_depth_list = [] #对齐后的深度图像 # 移除深度图像相关 natural_language_embedding_list = [] natural_language_instruction_list = [] base_displacement_vector_list = [] base_displacement_vertical_rotation_list = [] gripper_closedness_action_list = [] rotation_delta_list = [] terminate_episode_list = [] world_vector_list = [] discounted_return_list = [] return_list = [] reward_list = [] step_id_list = [] element_index_list = [] num_steps_list = [] is_first_list = [] is_last_list = [] step_type_list = [] next_step_type_list = [] image_ts = [] # 移除 image_aligned_depth_ts # image_aligned_depth_ts = [] #对齐后的深度图像 natural_language_embedding_ts = [] natural_language_instruction_ts = [] base_displacement_vector_ts = [] base_displacement_vertical_rotation_ts = [] gripper_closedness_action_ts = [] rotation_delta_ts = [] terminate_episode_ts = [] world_vector_ts = [] discounted_return_ts = [] return_ts = [] reward_ts = [] step_id_ts = [] element_index_ts = [] num_steps_ts = [] is_first_ts = [] is_last_ts = [] step_type_ts = [] next_step_type_ts = [] DATASETS_SLICES = 2250 # 一个子数据集中大概含有多少数据片段 slices_num = 0 # 片段计数 abdomen_num = 0 # 各指令取了多少图片 #neck_num = 0 show_list = [] # 给我看的,取了哪些元素 show_list.append(-1) datasets_path = "/data/coding/moxing/1" # 保存路径 # 以外层循环控制取这一次是取一个腹部事件还是颈部事件 abdomen_index = 1# 从第几个腹部事件开始取 #neck_index = 13 # 从第几个颈部事件开始取 element_index = 0 # 总事件索引 while element_index < 120: # 一次腹部一次颈部 switch_path = f"{my_dir_path}/abdomen_{abdomen_index}" # 双数位为腹部 switch_natural_language_embedding = natural_language_embedding_abdomen switch_natural_language_instruction = natural_language_instruction_abdomen element = f'abdomen_{abdomen_index}' # # 连续取两次腹部之后,取一次颈部 # if element_index % 3 == 1 or element_index % 3 == 2: # switch_path = f"{my_dir_path}/abdomen_{abdomen_index}" # switch_natural_language_embedding = natural_language_embedding_abdomen # switch_natural_language_instruction = natural_language_instruction_abdomen # element = f'abdomen_{abdomen_index}' # elif element_index % 3 == 0: # switch_path = f"{my_dir_path}/neck_{neck_index}" # switch_natural_language_embedding = natural_language_embedding_neck # switch_natural_language_instruction = natural_language_instruction_neck # element = f'neck_{neck_index}' # 打开位置数据文件 with open(f'{my_dir_path}/robot_data.txt', 'r') as file: # 逐行读取文件内容 lines = file.readlines() # 创建一个空列表用于存储数据 data_list = [] # 遍历每一行数据 for line in lines: # 使用逗号分割每行数据,并去除末尾的换行符 line_data = line.strip().split(',') # 将分割后的数据转换成浮点数,并添加到数据列表中 data_list.append([float(x) for x in line_data[1:]]) # 做差算0.4s内的变化量(基坐标系描述) d_data_list = [] pos_index = 0 while pos_index < len(data_list)-2: d_data_list.append([]) for i in range(6): d_data_list[int(pos_index/2)].append(data_list[pos_index+2][i] - data_list[pos_index][i]) # 数据集中暂定每一步间隔0.4s # print(int(pos_index/2)) pos_index+=2 # """ # 使用旋转矩阵算0.4s内的变化量(工具坐标系描述) # """ # d_data_list = [] # pos_index = 0 # while pos_index < len(data_list)-2: # d_data_list.append([]) # q_now = np.array([data_list[pos_index][7], data_list[pos_index][8], data_list[pos_index][9], data_list[pos_index][10], data_list[pos_index][11], # data_list[pos_index][12], data_list[pos_index][13]], dtype=np.float64) # q_after = np.array([data_list[pos_index+2][7], data_list[pos_index+2][8], data_list[pos_index+2][9], data_list[pos_index+2][10], data_list[pos_index+2][11], # data_list[pos_index+2][12], data_list[pos_index+2][13]], dtype=np.float64) # # 调用函数并获取工具坐标系的位姿变换矩阵 # T_now = calculate_Tt0(q_now) # T_after = calculate_Tt0(q_after) # # 未来工具坐标系相对于当前坐标系的齐次矩阵 # T_change = T_after.dot(np.linalg.inv(T_now) ) # # 提取旋转矩阵 # R_change = T_change[:3, :3] # # print(Rba) # # 创建一个Rotation对象 # rot = R.from_matrix(R_change) # # 计算欧拉角,单位为弧度 # euler_angles_radians = rot.as_euler('xyz') # # print("Euler angles (radians):", euler_angles_radians[0], euler_angles_radians[1], euler_angles_radians[2]) # # 移动改变量转动改变量赋值 # d_data_list[int(pos_index/2)].append(T_change[0][3]) # d_data_list[int(pos_index/2)].append(T_change[1][3]) # d_data_list[int(pos_index/2)].append(T_change[2][3]) # d_data_list[int(pos_index/2)].append(euler_angles_radians[0]) # d_data_list[int(pos_index/2)].append(euler_angles_radians[1]) # d_data_list[int(pos_index/2)].append(euler_angles_radians[2]) # # print("d_data_list:") # # print(d_data_list) # pos_index+=2 # print(len(d_data_list)) # print(len(d_data_list[0])) # print(d_data_list) # # 数据由0.4s内变化的量计算相应的速度 m/s rad/s # d_data_list_8 = [] # for i in range(len(d_data_list)): # d_data_list_8.append([]) # for j in range(6): # d_data_list_8[i].append(d_data_list[i][j]/0.4) # 就用0.4s内变化的量代替速度,防止除0.4之后将误差放大. 给机械臂运行之前需要除0.4,因为speedL需要的是m/s和rad/s d_data_list_8 = [] for i in range(len(d_data_list)): d_data_list_8.append([]) for j in range(6): d_data_list_8[i].append(d_data_list[i][j]/1) # 保留小数点后8位 for i in range(len(d_data_list_8)): for j in range(6): d_data_list_8[i][j] = round(d_data_list_8[i][j], 8) # print('------------------------------------------------------') # print(d_data_list_8) # print('------------------------------------------------------') """ 找出一次事件中的起始步和终止步 """ start_step = 0 end_step = 0 # 取起始步 for i in range(len(d_data_list_8)): if((start_step == 0) and ( abs(d_data_list_8[i][0]) > 0.001 or abs(d_data_list_8[i][1]) > 0.001 or abs(d_data_list_8[i][2]) > 0.001)): start_step = i elif((start_step == 0) and ( abs(d_data_list_8[i][3]) > 0.001 or abs(d_data_list_8[i][4]) > 0.001 or abs(d_data_list_8[i][5]) > 0.001)): start_step = i if((start_step != 0) and (end_step == 0) and ( abs(d_data_list_8[i][0]) < 0.0001 and abs(d_data_list_8[i][1]) < 0.0001 and abs(d_data_list_8[i][2]) < 0.0001) and ( abs(d_data_list_8[i][3]) < 0.0001 and abs(d_data_list_8[i][4]) < 0.0001 and abs(d_data_list_8[i][5]) < 0.0001)): end_step = i # 为了完整的加速过程,起始步前移一步 if(start_step >=1): start_step = start_step - 1 print(f'当前事件路径 {switch_path}') print(f'事件起始步序号 {start_step*2}') print(f'事件结束步序号 {end_step*2}') # input("检查始末两步是否正确,按下回车键开始...") """ 取出每一步的数据充入列表 """ print(switch_path) step_index = 0 d_data_list_8_index = start_step # 从起始步开始索引 # while step_index < end_step - start_step +1: while step_index < end_step - start_step +2: # reward之后多加一步 # print(f"事件: {element} slices_num: {slices_num} 指令: {switch_natural_language_instruction} 当前图片序号:{d_data_list_8_index*2}") if(show_list[-1] != element): show_list.append(element) if(step_index == 0): is_first_list.append(True) else: is_first_list.append(False) # if(step_index == end_step - start_step): if(step_index == end_step - start_step + 1): # reward之后多加一步 is_last_list.append(True) else: is_last_list.append(False) if (step_index == 0): step_type_list.append(tf_agents.trajectories.StepType.FIRST) next_step_type_list.append(tf_agents.trajectories.StepType.MID) # elif (step_index == end_step - start_step-1): elif (step_index == end_step - start_step): # reward之后多加一步 step_type_list.append(tf_agents.trajectories.StepType.MID) next_step_type_list.append(tf_agents.trajectories.StepType.LAST) # elif (step_index == end_step - start_step): elif (step_index == end_step - start_step + 1): # reward之后多加一步 step_type_list.append(tf_agents.trajectories.StepType.LAST) next_step_type_list.append(tf_agents.trajectories.StepType.FIRST) else: step_type_list.append(tf_agents.trajectories.StepType.MID) next_step_type_list.append(tf_agents.trajectories.StepType.MID) # 各指令数据个数 switch_natural_language_instruction == b'move to abdomen' abdomen_num+=1 """ 加载图像并处理 """ # 读取深度图像 # depth_image_path = f'{switch_path}/aligned_depth_image_{d_data_list_8_index*2}.png' # depth_image = cv2.imread(depth_image_path, cv2.IMREAD_UNCHANGED) # 调整大小为 (256, 320) # resized_depth_image = cv2.resize(depth_image, (320, 256), interpolation=cv2.INTER_NEAREST) # 归一化 # normalized_depth_image = resized_depth_image / 65535.0 # 假设深度图像的像素值范围为 [0, 65535] # 将图像转换为TensorFlow格式 # depth_image_tensor = tf.convert_to_tensor(normalized_depth_image, dtype=tf.float32) # 在最后一个维度上添加一个维度,使其变为 (256, 320, 1) # depth_image_tensor = tf.expand_dims(depth_image_tensor, axis=-1) rgb_image_path = f'{my_dir_path}/cam_high{d_data_list_8_index*2}.png' rgb_image = cv2.imread(rgb_image_path) # # 检查事件结束图像 # if(step_index == end_step - start_step +1): # print(rgb_image_path) # exit() # 调整大小为 (256, 320) resized_rgb_image = cv2.resize(rgb_image, (320, 256), interpolation=cv2.INTER_NEAREST) # 归一化 normalized_rgb_image = resized_rgb_image / 255.0 # 假设深度图像的像素值范围为 [0, 65535] # 将图像转换为TensorFlow格式 rgb_image_tensor = tf.convert_to_tensor(normalized_rgb_image, dtype=tf.float32) # 打印图像张量的形状 # print(rgb_image_tensor) # print("Tensor shape:", rgb_image_tensor.shape) # # 显示图像 # cv2.imshow('Resized Depth Image', rgb_image) # cv2.waitKey(0) # cv2.destroyAllWindows() if (step_index == end_step - start_step): # 事件最后一步的处理 terminate_episode = tf.constant([1, 0], dtype=tf.int32) reward = tf.constant(1.0, dtype=tf.float32) rotation_delta = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) world_vector = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) elif(step_index == end_step - start_step + 1): # reward之后多加一步 terminate_episode = tf.constant([1, 0], dtype=tf.int32) reward = tf.constant(0.0, dtype=tf.float32) rotation_delta = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) world_vector = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) else: terminate_episode = tf.constant([0, 1], dtype=tf.int32) reward = tf.constant(0.0, dtype=tf.float32) # print(f'{d_data_list_8[d_data_list_8_index][0]}, {d_data_list_8[d_data_list_8_index][1]}, {d_data_list_8[d_data_list_8_index][2]}, {d_data_list_8[d_data_list_8_index][3]}, {d_data_list_8[d_data_list_8_index][4]}, {d_data_list_8[d_data_list_8_index][5]}, ') # 实际机械臂0.4s内的最大移动量为正负0.012, 假设移动的最大变化量为正负0.05m/s, 将其扩大到正负1的尺度上时,需要x20 world_vector = tf.constant([d_data_list_8[d_data_list_8_index][0]*20, d_data_list_8[d_data_list_8_index][1]*20, d_data_list_8[d_data_list_8_index][2]*20], dtype=tf.float32) # 实际机械臂0.4s内的最大转动量为正负0.041, 假设转动的最大变化量为正负0.079rad/s, 将其扩大到正负1.57的尺度上时,需要x20 rotation_delta = tf.constant([d_data_list_8[d_data_list_8_index][3]*20, d_data_list_8[d_data_list_8_index][4]*20, d_data_list_8[d_data_list_8_index][5]*20], dtype=tf.float32) print(f'{element} {d_data_list_8[d_data_list_8_index][0]}, {d_data_list_8[d_data_list_8_index][1]}, {d_data_list_8[d_data_list_8_index][2]}, {d_data_list_8[d_data_list_8_index][3]}, {d_data_list_8[d_data_list_8_index][4]}, {d_data_list_8[d_data_list_8_index][5]}, ') if(abs(d_data_list_8[d_data_list_8_index][0]) > 0.05 or abs(d_data_list_8[d_data_list_8_index][1]) > 0.05 or abs(d_data_list_8[d_data_list_8_index][2]) > 0.05): input("移动动作超限, 请检查...") elif(abs(d_data_list_8[d_data_list_8_index][3]) > 0.078 or abs(d_data_list_8[d_data_list_8_index][4]) > 0.078 or abs(d_data_list_8[d_data_list_8_index][5]) > 0.078): input("转动动作超限, 请检查...") # 夹爪动作全程为0,不做任何控制 gripper_closedness_action = tf.constant([0.0], dtype=tf.float32) # print(rotation_delta) # print(f'rotation_delta.shape = {rotation_delta.shape}') # print(world_vector) # print(f'world_vector.shape = {world_vector.shape}') # 充入data_list image_list.append(rgb_image_tensor) # image_aligned_depth_list.append(depth_image_tensor) # 移除深度图像相关 natural_language_embedding_list.append(tf.convert_to_tensor(switch_natural_language_embedding)) natural_language_instruction_list.append(tf.convert_to_tensor(switch_natural_language_instruction)) base_displacement_vector_list.append(tf.constant([0.0, 0.0], dtype=tf.float32)) base_displacement_vertical_rotation_list.append(tf.constant([0.0], dtype=tf.float32)) # gripper_closedness_action_list.append(tf.constant([0.0], dtype=tf.float32)) gripper_closedness_action_list.append(gripper_closedness_action) rotation_delta_list.append(rotation_delta) terminate_episode_list.append(terminate_episode) world_vector_list.append(world_vector) discounted_return_list.append(tf.constant(1.0, dtype=tf.float32)) # 图方便,反正训练的时候不用 return_list.append(tf.constant(1.0, dtype=tf.float32)) reward_list.append(reward) step_id_list.append(tf.constant(step_index, dtype=tf.int32)) element_index_list.append(tf.constant(element, dtype=tf.string)) # num_steps_list.append(tf.constant(end_step - start_step + 1, dtype=tf.int32)) num_steps_list.append(tf.constant(end_step - start_step + 2, dtype=tf.int32)) # reward之后多加一步 slices_num+=1 # 片段计数 step_index+=1 d_data_list_8_index+=1 # # 数据检查 # print('------------------------------------------------------') # world_vector_ = [None] * len(world_vector_list) # for i in range(len(world_vector_list)): # world_vector_[i] = world_vector_list[i].numpy().tolist() # print(f'{world_vector_}\n') # print('------------------------------------------------------') # rotation_delta_ = [None] * len(rotation_delta_list) # for i in range(len(rotation_delta_list)): # rotation_delta_[i] = rotation_delta_list[i].numpy().tolist() # print(f'{rotation_delta_}\n') # print('------------------------------------------------------') if slices_num > DATASETS_SLICES or abdomen_index > 123 : break # 跳出外层循环 # 一次腹部一次颈部 abdomen_index+=1 # # 连续取两次腹部之后,取一次颈部 # if element_index % 3 == 1 or element_index % 3 == 2: # abdomen_index+=1 # elif element_index % 3 == 0: # neck_index+=1 element_index+=1 image_ts = tf.stack(image_list, axis=0) # image_aligned_depth_ts = tf.stack(image_aligned_depth_list, axis=0) # 移除深度图像相关 natural_language_embedding_ts = tf.stack(natural_language_embedding_list, axis=0) natural_language_instruction_ts = tf.stack(natural_language_instruction_list, axis=0) base_displacement_vector_ts = tf.stack(base_displacement_vector_list, axis=0) base_displacement_vertical_rotation_ts = tf.stack(base_displacement_vertical_rotation_list, axis=0) gripper_closedness_action_ts = tf.stack(gripper_closedness_action_list, axis=0) rotation_delta_ts = tf.stack(rotation_delta_list, axis=0) terminate_episode_ts = tf.stack(terminate_episode_list, axis=0) world_vector_ts = tf.stack(world_vector_list, axis=0) discounted_return_ts = tf.stack(discounted_return_list, axis=0) return_ts = tf.stack(return_list, axis=0) reward_ts = tf.stack(reward_list, axis=0) step_id_ts = tf.stack(step_id_list, axis=0) element_index_ts = tf.stack(element_index_list, axis=0) num_steps_ts = tf.stack(num_steps_list, axis=0) is_first_ts = tf.stack(is_first_list, axis=0) is_last_ts = tf.stack(is_last_list, axis=0) step_type_ts = tf.stack(step_type_list, axis=0) next_step_type_ts = tf.stack(next_step_type_list, axis=0) # 所有数据存入一个字典 data = { "image" : image_ts, # "image_aligned_depth" : image_aligned_depth_ts, # 移除深度图像相关 "natural_language_embedding" : natural_language_embedding_ts, "natural_language_instruction" : natural_language_instruction_ts, "base_displacement_vector" : base_displacement_vector_ts, "base_displacement_vertical_rotation" : base_displacement_vertical_rotation_ts, "gripper_closedness_action" : gripper_closedness_action_ts, "rotation_delta" : rotation_delta_ts, "terminate_episode" : terminate_episode_ts, "world_vector" : world_vector_ts, "discounted_return" : discounted_return_ts, "return" : return_ts, "reward" : reward_ts, "step_id" : step_id_ts, "element_index" : element_index_ts, "num_steps" : num_steps_ts, "is_first" : is_first_ts, "is_last" : is_last_ts, "step_type" : step_type_ts, "next_step_type" : next_step_type_ts, } dataset = tf.data.Dataset.from_tensor_slices(data) # 在数据集中被重新切分为 BATCH 个元素 print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") print(show_list) print(f"abdomen_num:{abdomen_num} ") # # 保存 dataset.save(datasets_path)根据此代码画出流程图
06-01
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值