import pickle
import numpy as np
import torch
from torch.autograd import Variable # 封装了Tensor,并整合了反向传播的相关实现
import math
import scipy.io as sio
import time
time_start = time.time()
num_select = 1000
num_ratio = 0.8
num_train = int(num_select * num_ratio)
num_test = int(num_select - num_train)
# alpha固定
num_GSNR = int(45)
num_alpha = int(1)
# GSNR固定
# num_GSNR = int(1)
# num_alpha = int(12)
type_modulation1 = ['16QAM', '2PSK', '4PAM', '4PSK', '64QAM', '8PSK', 'AM_DSB', 'AM_SSB', 'FM', 'GMSK', 'MSK']
type_modulation2 = []
type_modulation = type_modulation1+type_modulation2
num_modulation = len(type_modulation)
pred_mat = np.zeros([1, num_modulation*num_modulation*num_GSNR*num_alpha])
pred_mat.shape = [num_modulation, num_modulation, num_GSNR, num_alpha]
pred_mat = np.floor(pred_mat)
IMG_WIDTH = IMG_HEIGHT = 36
IMG_DEPTH = 1
def whitening_image(image_np):
'''
Performs per_image_whitening
:param image_np: a 4D numpy array representing a batch of images
:return: the image numpy array after whitened
'''
for i in range(len(image_np)):
mean = np.mean(image_np[i, ...])
# Use adjusted standard deviation here, in case the std == 0.
std = np.max([np.std(image_np[i, ...]), 1.0/np.sqrt(IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH)])
image_np[i, ...] = (image_np[i, ...] - mean) / std
return image_np
def _read_one_batch(path, is_random_label):
'''
The training data contains five data batches in total. The validation data has only one
batch. This function takes the directory of one batch of data and returns the images and
corresponding labels as numpy arrays
:param path: the directory of one batch of data
:param is_random_label: do you want to use random labels?
:return: image numpy arrays and label numpy arrays
'''
fo = open(path, 'rb')
dicts = pickle.load(fo, encoding = 'iso-8859-1') # 将文件中的数据解析为一个python对象
fo.close()
data = dicts[b'data']
filenames = dicts[b'filename']
if is_random_label is False:
# label = np.array(dicts['labels'])
label = np.array(dicts[b'labels'])
else:
labels = np.random.randint(low=0, high=10, size=10000) # 返回随机整型数
label = np.array(labels)
return data, label, filenames
def read_in_all_images(address_list, shuffle = True, is_random_label = False):
"""
This function reads all training or validation data, shuffles them if needed, and returns the
images and the corresponding labels as numpy arrays
:param address_list: a list of paths of cPickle files
:return: concatenated numpy array of data and labels. Data are in 4D arrays: [num_images,
image_height, image_width, image_depth] and labels are in 1D arrays: [num_images]
"""
data = np.array([]).reshape([0, IMG_WIDTH * IMG_HEIGHT * IMG_DEPTH])
label = np.array([])
filenames = np.array([])
for address in address_list:
print('Reading images from ' + address)
batch_data, batch_label, batch_filenames = _read_one_batch(address, is_random_label)
# Concatenate along axis 0 by default
data = np.concatenate((data, batch_data)) # numpy库数组拼接
label = np.concatenate((label, batch_label))
filenames = np.concatenate((filenames, batch_filenames))
num_data = len(label)
# This reshape order is really important. Don't change
# Reshape is correct. Double checked
data = data.reshape((num_data, IMG_HEIGHT * IMG_WIDTH, IMG_DEPTH), order='F') # Fortran语言排列格式
data = data.reshape((num_data, IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH))
if shuffle is True:
print('Shuffling')
order = np.random.permutation(num_data) # 随机排列序列
data = data[order, ...]
label = label[order]
filenames= filenames[order]
data = data.astype(np.float32) # 转换数据类型
return data, label, filenames
def get_acc(output, label):
total = output.shape[0] # 获取第一个维度
_, pred_label = output.max(1) # 返回每一行中最大值的那个元素,且返回其索引(返回最大元素在这一行的列索引)
num_correct = (pred_label == label).sum().item() # 返回预测正确的数量
return num_correct / total # 准确率
test_data, test_label, test_filename = read_in_all_images([r"D:\shixun\data_train_test\data_test1"], shuffle=False)
model = torch.load(
'./model/model0.4090909090909091.pkl',
map_location=torch.device('cpu'),
weights_only=False # 关闭权重仅加载模式,允许加载自定义模型类
)
print('load best model successfully')
test_acc = 0
test_batch_size = 256
model = model.eval()
num_test_data = len(test_data)
num_batch_test = math.ceil(num_test_data / test_batch_size) # 向上取整
for cnt_test_batch in range(num_batch_test):
# print(cnt_val_batch)
test_batch_data = test_data[cnt_test_batch * test_batch_size:(cnt_test_batch + 1) * test_batch_size, ...]
test_batch_data = whitening_image(test_batch_data)
test_batch_tensor = torch.from_numpy(test_batch_data) # 转化为张量
test_data_tensor_cov = test_batch_tensor.permute(0, 3, 1, 2) # 交换维度
test_batch_label = test_label[cnt_test_batch * test_batch_size:(cnt_test_batch + 1) * test_batch_size, ...]
name = test_filename[cnt_test_batch * test_batch_size:(cnt_test_batch + 1) * test_batch_size, ...]
test_label_tensor = torch.from_numpy(test_batch_label)
if torch.cuda.is_available():
with torch.no_grad():
test_data_tensor_cov = Variable(test_data_tensor_cov.cuda()) # (bs, 3, h, w)tensor变成variable
test_label_tensor = Variable(test_label_tensor.cuda()) # (bs,
else:
with torch.no_grad():
test_data_tensor_cov = Variable(test_data_tensor_cov)
test_label_tensor = Variable(test_label_tensor)
out = model(test_data_tensor_cov)
test_acc += get_acc(out, test_label_tensor)
for cnt_modulation in range(0, num_modulation, 1):
print('cnt_modulation={}'.format(int(cnt_modulation)))
# path_tal = '/Test_Data_'+str(num_select)+'/'+type_modulation[cnt_modulation]
# path_test = path_hed + path_mid + path_tal + '/'
filnam_hed = 'b\"[\'SR_CONST_'
filename_hed = filnam_hed + type_modulation[cnt_modulation]
for GSNR in range(-20, 22, 2):
print('GSNR={}'.format(int(GSNR)))
cnt_GSNR = GSNR + 20
for alpha in range(15, 16, 1):
print('alpha={}'.format(int(alpha)))
cnt_alpha = alpha - 15
filename_mid = ' alpha = ' + str(int(alpha)) + ' GSNR = ' + str(int(GSNR))
# for cnt_cycle in range(1, num_test + 1, 1):
filename_tal = '.mat\']\"'
filename_test = filename_hed + filename_mid + filename_tal
for i in range(len(test_batch_data)):
filename = str(name[i])
# print(filename)
true_label = int(test_batch_label[i])
if filename == filename_test:
print(filename)
print(cnt_modulation)
print(true_label)
_, pred_label = out.max(1)
pre_labels = pred_label.cpu().numpy()
pre_label = pre_labels[i]
pred_mat[cnt_modulation, pre_label, cnt_GSNR, cnt_alpha] = pred_mat[cnt_modulation, pre_label, cnt_GSNR, cnt_alpha] + 1
print('Accuracy of the test set: %f%%' % (test_acc / num_batch_test*100))
sio.savemat('CONST2_3.mat', {'CONST2_3': pred_mat})
print('The confusion matrix has been successfully generated!')
time_end = time.time()
print('time cost', time_end-time_start, 's')
:\Users\Lenovo\PyCharmMiscProject\.venv\Scripts\python.exe "D:\shixun\confusion matrix.py"
Reading images from D:\shixun\data_train_test\data_test1
Traceback (most recent call last):
File "D:\shixun\confusion matrix.py", line 118, in <module>
test_data, test_label, test_filename = read_in_all_images([r"D:\shixun\data_train_test\data_test1"], shuffle=False)
~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\shixun\confusion matrix.py", line 91, in read_in_all_images
data = np.concatenate((data, batch_data)) # numpy库数组拼接
ValueError: all the input array dimensions except for the concatenation axis must match exactly, but along dimension 1, the array at index 0 has size 1296 and the array at index 1 has size 10000
修正代码
最新发布