如何在下面这段代码中进行检查维度问题:def Data_load_SUMIMO_train(Config):
print('加载接收端的频域符号:')
x12_data = []
x13_data = []
for i, data_path in enumerate(Config.data_path_list):
relative_path = data_path + '/RxFreqData' # './'表示当前目录,'../'表示上级目录
path_list = sorted(os.listdir(relative_path)) # 返回的是该路径下所有文件和目录组成的列表,sorted方法确保后续path_list列表中元素的顺序保持一致
print(path_list[Config.segment_num[i][0]:Config.segment_num[i][1]])
x11_data = []
for file in path_list[Config.segment_num[i][0]:Config.segment_num[i][1]]:
file_path = os.path.join(relative_path, file)
feature_data = load(file_path) # 加载mat文件,得到的feature_data为字典类型
x1 = np.asarray(feature_data['group_freqdata'], dtype=np.complex64) # 根据字典中相应的key提取numpy数组,其中'group_freqdata'为变量名字。
x11_data.extend(x1) # 统计每个点位下的训练数据
train = x11_data[:int(len(x11_data) * 0.9)]
val = x11_data[int(len(x11_data) * 0.9):]
print('第{}个路径下参与训练的数据为{}'.format(i, len(train)))
print('第{}个路径下参与验证的数据为{}'.format(i, len(val)))
x12_data.extend(train)
x13_data.extend(val)
train_Feature_part1 = np.asarray(x12_data) # 参与训练的第一部分特征
val_Feature_part1 = np.asarray(x13_data) # 参与验证的第一部分特征
print('总的训练的数据为{}'.format(len(train_Feature_part1)))
print('总的验证数据为{}'.format(len(val_Feature_part1)))
print('加载接收端的导频估计:')
x32_data = []
x33_data = []
for i, data_path in enumerate(Config.data_path_list):
relative_path = data_path + '/LsEstimation' # './'表示当前目录,'../'表示上级目录
path_list = sorted(os.listdir(relative_path)) # 返回的是该路径下所有文件和目录组成的列表,sorted方法确保后续path_list列表中元素的顺序保持一致
print(path_list[Config.segment_num[i][0]:Config.segment_num[i][1]])
x31_data = []
for file in path_list[Config.segment_num[i][0]:Config.segment_num[i][1]]:
file_path = os.path.join(relative_path, file)
feature_data = load(file_path) # 加载mat文件,得到的feature_data为字典类型
x3 = np.asarray(feature_data['group_lsresult'], dtype=np.complex64) # 根据字典中相应的key提取numpy数组,其中'group_lsresult'为变量名字。
x31_data.extend(x3) # x3_data为列表形式
train = x31_data[:int(len(x31_data) * 0.9)]
val = x31_data[int(len(x31_data) * 0.9):]
x32_data.extend(train)
x33_data.extend(val)
train_Feature_part3 = np.asarray(x32_data) # 参与训练的第2部分特征
val_Feature_part3 = np.asarray(x33_data) # 参与验证的第2部分特征
print('---进行插值----')
start_time = time.perf_counter()
# -----------------训练集插值-----------------
rows, _, _, _, layer_num = train_Feature_part3.shape
for i in range(rows):
for j in range(layer_num):
array = train_Feature_part3[i, :, :, :, j] # (TTI, S, F, Nr, Nt)
train_Feature_part3[i, :, :, :, j] = nearest_neighbor_interpolation_single_pilot_01(array)
rows, _, _, _, layer_num = val_Feature_part3.shape
for i in range(rows):
for j in range(layer_num):
array = val_Feature_part3[i, :, :, :, j] # (TTI, S, F, Nr, Nt)
val_Feature_part3[i, :, :, :, j] = nearest_neighbor_interpolation_single_pilot_01(array)
end_time = time.perf_counter()
print('插值所用时间为:{}min'.format((end_time - start_time) / 60))
print('---插值完成---')
print('加载标签:')
x42_data = []
x43_data = []
for i, data_path in enumerate(Config.data_path_list):
relative_path = data_path + '/Label' # './'表示当前目录,'../'表示上级目录
path_list = sorted(os.listdir(relative_path)) # 返回的是该路径下所有文件和目录组成的列表,sorted方法确保后续path_list列表中元素的顺序保持一致
print(path_list[Config.segment_num[i][0]:Config.segment_num[i][1]])
x41_data = []
for file in path_list[Config.segment_num[i][0]:Config.segment_num[i][1]]:
file_path = os.path.join(relative_path, file)
feature_data = load(file_path) # 加载mat文件,得到的feature_data为字典类型
x4 = feature_data['group_txcwbits'] # 根据字典中相应的key提取numpy数组,其中'group_txcwbits'为变量名字。
x41_data.extend(x4)
train = x41_data[:int(len(x41_data) * 0.9)]
val = x41_data[int(len(x41_data) * 0.9):]
x42_data.extend(train)
x43_data.extend(val)
y_train = np.asarray(x42_data, dtype=np.float32) # 参与训练的标签
y_val = np.asarray(x43_data, dtype=np.float32) # 参与验证的标签
train_Feature_part3 = train_Feature_part3.reshape(train_Feature_part3.shape[0], train_Feature_part3.shape[1],
train_Feature_part3.shape[2], train_Feature_part3.shape[3] * train_Feature_part3.shape[4])
print(
val_Feature_part3 = val_Feature_part3.reshape(val_Feature_part3.shape[0], val_Feature_part3.shape[1],
val_Feature_part3.shape[2], val_Feature_part3.shape[3] * val_Feature_part3.shape[4])
x_train = np.concatenate((train_Feature_part1, train_Feature_part3), axis=3) # (TTI, S, F, 12)
x_val = np.concatenate((val_Feature_part1, val_Feature_part3), axis=3) # (TTI, S, F, 12)
return x_train, y_train, x_val, y_val