np.save 和 sio.savemat 简单的比较

本文通过一个简单的示例对比了Python与Matlab在数据存储上的差异,特别是在使用`.mat`格式时,由于Matlab对一维数组的处理方式与Python不同,导致数据在导入时可能出现维度的变化。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

#coding=gbk
import numpy as np
import scipy.io as sio # mat

# 对两种save 方法进行比较
# 比较结果,同样的数据,通过.mat 存储后在导入变成了二维的,因为matlab对[4,]和[1,4]没有区别,但python有区别。
# 结论,所以使用的时候要注意些。
arr1 = np.array([1,2,3,4])
arr2 = np.array([3,4,5,6])

sio.savemat('test.mat', {'a':arr1})

a = sio.loadmat('test.mat')
a = a['a']

np.save('test.npy',arr2)
b = np.load('test.npy', 'r')

print(a,b)
print(a.shape,b.shape)

Result:

[[1 2 3 4]] [3 4 5 6]
(1, 4) (4,)
#Defining some of the functions used def de2bi(d, n): d = np.array(d) power = 2**np.arange(n) return (np.floor((d[:,None]%(2*power))/power)) def bi2de(L,m): Ldec = np.zeros((int(2**m),1), dtype = np.int) for i in range(int(m)): for j in range(int(2**m)): Ldec[j] = Ldec[j] + L[j,i]*2**i return Ldec def get_labeling(m): M = 2**m if m == 1: L = np.asarray([[0],[1]]) else: L = np.zeros((M,m)) L[0:int(M/2),1:m] = get_labeling(m-1) L[int(M/2):M, 1:m] = np.flipud(L[0:int(M/2),1:m]) L[int(M/2):M,0] = 1 return L def get_constellation(M): Delta = np.sqrt(3/(2*(M-1))) Xpam = np.expand_dims(Delta*np.arange(-(np.sqrt(M)-1),np.sqrt(M)+1,2),axis = 1) xpamt_2D = np.tile(Xpam,(1,int(np.sqrt(M)))) xpamt = np.expand_dims(xpamt_2D.flatten(),axis = 1) X = np.transpose(np.reshape(np.asarray([xpamt, np.tile(Xpam,(int(np.sqrt(M)),1))]),(2,M))) Ltmp = get_labeling(int(np.log2(M)/2)) Ltmp_dec = bi2de(Ltmp,int(np.log2(M))/2) Ltmp_dec2 = np.tile(Ltmp_dec,(1,int(np.sqrt(M)))) Ltmp_dec3 = np.expand_dims(Ltmp_dec2.flatten(),axis = 1) L = np.concatenate((np.reshape(de2bi(Ltmp_dec3,int(np.log2(M)/2)),(M,int(np.log2(M)/2))), np.tile(Ltmp,(int(np.sqrt(M)),1))), axis = 1) Ldec = np.reshape(np.asarray(bi2de(np.fliplr(L),int(np.log2(M))),dtype = np.int),M) return [X,Ldec] def get_APSK(M2,Amount_of_Rings): M = int(M2/Amount_of_Rings) * np.ones(Amount_of_Rings, dtype = np.int) X = np.zeros((sum(M),2)) if Amount_of_Rings > 1: idx = 0 l_r1 = get_labeling(int(np.log2(M[0])))*M.shape[0] ldec_r1 = bi2de(l_r1,int(np.log2(M[0]))) print('a') l_rs = get_labeling(int(np.log2(M.shape[0]))) ldec_rs = bi2de(l_rs,int(np.log2(M.shape[0]))) l_apsk = np.zeros((sum(M),1),dtype = np.int) for i in range(M.shape[0]): R = np.sqrt(-np.log(1-(i+0.5)*1/M.shape[0])) for j in range(M[i]): X[idx+j,:] = [R*np.cos(j*2*math.pi/M[i]), R*np.sin(j*2*math.pi/M[i]) ] l_apsk[idx:idx+M[i],] = ldec_r1 + ldec_rs[i] idx = idx + M[i] l_apsk = np.squeeze(l_apsk) else: Lbin = get_labeling(int(np.log2(M[0]))) l_apsk = np.squeeze(bi2de(Lbin,int(np.log2(M[0])))) for j in range(M2): X[j,:] = [np.cos(j*2*math.pi/M2), np.sin(j*2*math.pi/M2)] return [X, l_apsk] def get_constellation_4D(M): m = int(np.log2(M)) m_pd = int(m/4) Delta = np.sqrt(np.sqrt(3/(2*(M-1)))) Xpam = np.expand_dims(Delta*np.arange(-(np.sqrt(np.sqrt(M))-1),np.sqrt(np.sqrt(M))+1,2),axis = 1) Lpam = get_labeling(m_pd) X1 = (np.expand_dims(np.ones(int(M/2**m_pd)),axis= 1)*np.transpose(Xpam)).flatten('F') X2 = np.tile((np.expand_dims(np.ones(int(M/2**(m_pd*2))),axis= 1)*np.transpose(Xpam)).flatten('F'), 2**m_pd) X3 = np.tile((np.expand_dims(np.ones(int(M/2**(m_pd*3))),axis= 1)*np.transpose(Xpam)).flatten('F'), 2**(m_pd*2)) X4 = np.tile((np.expand_dims(np.ones(int(M/2**(m_pd*4))),axis= 1)*np.transpose(Xpam)).flatten('F'), 2**(m_pd*3)) L1 = np.reshape(np.tile(Lpam,(int(M/2**m_pd))), (M,m_pd)) L2 = np.tile(np.reshape(np.tile(Lpam,(int(M/2**(m_pd*2)))), (int(M/2**m_pd),m_pd)),(2**m_pd,1)) L3 = np.tile(np.reshape(np.tile(Lpam,(int(M/2**(m_pd*3)))), (int(M/2**(m_pd*2)),m_pd)),(2**(m_pd*2),1)) L4 = np.tile(np.reshape(np.tile(Lpam,(int(M/2**(m_pd*4)))), (int(M/2**(m_pd*3)),m_pd)),(2**(m_pd*3),1)) X = np.transpose(np.asarray([X1,X2,X3,X4])) Lbin =np.asarray(np.concatenate((L1,L2,L3,L4),axis = 1), dtype = np.int) Ldec = np.squeeze(bi2de(Lbin,m)) return [X,Ldec] def get_APSK_4D(M, Amount_of_Rings): m = int(np.log2(M)) M2 = int(np.sqrt(M)) X_2D, l_apsk_2D = get_APSK(M2,Amount_of_Rings) Lbin_2D = de2bi(l_apsk_2D, int(m/2)) X1 = np.repeat(X_2D,M2,axis = 0) X2 = np.reshape(np.repeat(X_2D,M2,axis = 1), (M,2), 'F') L1 = np.repeat(Lbin_2D, M2, axis = 0) L2 = np.reshape(np.repeat(Lbin_2D,M2,axis = 1), (M,int(m/2)), 'F') X = np.concatenate([X1,X2], axis = 1) Lbin = np.concatenate([L1,L2], axis = 1) Ldec = np.squeeze(bi2de(Lbin,m)) return[X,Ldec] def get_Optimized_4D(X_2D, l_2D, M): m = int(np.log2(M)) M2 = int(np.sqrt(M)) Lbin_2D = de2bi(l_2D, int(m/2)) X1 = np.repeat(X_2D,M2,axis = 0) X2 = np.reshape(np.repeat(X_2D,M2,axis = 1), (M,2), 'F') L1 = np.repeat(Lbin_2D, M2, axis = 0) L2 = np.reshape(np.repeat(Lbin_2D,M2,axis = 1), (M,int(m/2)), 'F') X = np.concatenate([X1,X2], axis = 1) Lbin = np.concatenate([L1,L2], axis = 1) Ldec = np.squeeze(bi2de(Lbin,m)) return[X,Ldec] def get_Random_4D(M): m = int(np.log2(M)) M2 = int(M/16) X_G = abs(np.random.normal(0,1,(M2,4))) L_2D = get_labeling(m) L_Q = get_labeling(4) X_T = np.tile(X_G,[16,1]) Q_M = 2*L_Q -1 Q_M = np.repeat(Q_M,M2, axis = 0) X_RN = X_T*Q_M L_2D = np.tile(L_2D,[16,1]) L_Q = np.repeat(L_Q,M2,axis = 0) Lbin = np.concatenate([L_2D,L_Q],axis = 1) Ldec = np.squeeze(bi2de(Lbin,m)) return[X_RN,Ldec] def awgn_channel(x): return x + np.sqrt(sigma2)*tr.randn(M*stacks,channel_uses).to(Device) def normalization(x): # E[|x|^2] = 1 return x / tr.sqrt((channel_uses*(x**2)).mean()) def save(): tr.save({ 'model_state_dict' : encoder.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss_history, 'constellations': Constellations, 'SNR': EsNo_dB, 'epochs' : epochs, 'stacks' : stacks, 'learning_rate': learning_rate, 'Device': Device, 'time': time.time()-start_time, 'Ldec': idx_train, 'Lbin': de2bi(idx_train,m)},'./Data/GMI/' + str(channel_uses) + 'D/' + str(M) + '/GMI_' + Estimation_type + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr_' + Initial_Constellation) sio.savemat('./Data/GMI/' + str(channel_uses) + 'D/' + str(M) + '/GMI_' + Estimation_type + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr_' + Initial_Constellation + '.mat', { 'model_state_dict' : encoder.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss_history, 'constellations': Constellations, 'SNR': EsNo_dB, 'epochs' : epochs, 'stacks' : stacks, 'learning_rate': learning_rate, 'Device': Device, 'time': time.time()-start_time, 'Ldec': idx_train, 'Lbin': de2bi(idx_train,m)}) 解释并注释上述代码
最新发布
08-16
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值