np.newaxis和np.power

本文介绍了NumPy库中两种常用操作:np.power用于计算数组元素的幂运算,支持数组与数组或数组与数值间的运算;np.newaxis用于增加数组的维度,通过实例展示了其在不同场景下的应用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

1.np.power

numpy.power(x1, x2)

数组的元素分别求n次方。x2可以是数字,也可以是数组,但是x1和x2的列数要相同。

x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([  0,   1,   8,  27,  64, 125])
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([  0.,   1.,   8.,  27.,  16.,   5.])
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
       [1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0,  1,  8, 27, 16,  5],
       [ 0,  1,  8, 27, 16,  5]])

2.np.newaxis()

np.newaxis,增加维度

np.linspace(1, 10, 10)
  array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
np.linspace(1, 10, 10)[np.newaxis,:]
 array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]])
 np.linspace(1, 10, 10)[:,np.newaxis]
  array
 ([[ 1.],
  [ 2.],
  [ 3.],
  [ 4.],
  [ 5.],
  [ 6.],
  [ 7.],
  [ 8.],
  [ 9.],
  [ 10.]])

In [4]: np.linspace(1, 10, 10).shape
Out[4]: (10,)%数组

In [5]: np.linspace(1, 10, 10)[np.newaxis,:].shape
Out[5]: (1, 10)%矩阵

In [6]: np.linspace(1, 10, 10)[:,np.newaxis].shape
Out[6]: (10, 1)
import numpy as np import matplotlib.pyplot as plt plt.rcParams['font.sans-serif'] = ['SimHei'] # 设置中文字体 plt.rcParams['axes.unicode_minus'] = False # 正确显示负号 # 1. 生成二维数据 n = 10000 # 数据点数量 x1 = np.random.uniform(-1, 1, n) v = np.random.normal(0, 0.04, n) x2 = x1**2 + v X = np.column_stack((x1, x2)) # 2. 核矩阵计算与中心化 def compute_kernel_matrix(X, d): n = X.shape[0] K = np.dot(X, X.T) # 初始点积矩阵 if d > 1: K = np.power(K, d) return K def center_kernel_matrix(K): n = K.shape[0] ones_n = np.ones((n, n)) / n return K - ones_n @ K - K @ ones_n + ones_n @ K @ ones_n # 3. 特征值分析(使用对称矩阵特征分解) def get_top_eigenvalues(K, top_k=3): eigenvalues, eigenvectors = np.linalg.eigh(K) # 改用eigh return np.sort(eigenvalues)[::-1][:top_k], eigenvectors # 4. 执行KPCA并记录结果 d_values = [1, 2, 3, 4] eigenvalues_dict = {} eigenvectors_dict = {} for d in d_values: K = compute_kernel_matrix(X, d) centered_K = center_kernel_matrix(K) eigenvalues, eigenvectors = get_top_eigenvalues(centered_K) eigenvalues_dict[d] = eigenvalues eigenvectors_dict[d] = eigenvectors # 5. 可视化结果 plt.figure(figsize=(10, 8)) # 特征值对比图 plt.subplot(2, 1, 1) for d in d_values: eigenvalues = eigenvalues_dict[d] plt.plot(range(1, len(eigenvalues)+1), eigenvalues, marker='o', label=f'd={d}') plt.xlabel('主成分序号') plt.ylabel('特征值') plt.title('不同核函数次数下的特征值对比') plt.legend() # 数据分布与主成分方向图 plt.subplot(2, 1, 2) plt.scatter(X[:, 0], X[:, 1], alpha=0.5, label='原始数据') # 绘制主成分方向(以d=2为例) d = 2 K = compute_kernel_matrix(X, d) centered_K = center_kernel_matrix(K) eigenvalues, eigenvectors = get_top_eigenvalues(centered_K) sorted_indices = np.argsort(eigenvalues)[::-1] top_eigenvectors = eigenvectors[:, sorted_indices[:2]] # 转换为特征空间的主成分方向 phi_vectors = (top_eigenvectors.T @ centered_K) / np.sqrt(eigenvalues[sorted_indices[:2]])[:, np.newaxis] # 归一化方向向量 phi_vectors_normalized = phi_vectors / np.linalg.norm(phi_vectors, axis=1)[:, np.newaxis] # 绘制主成分方向 for i i
03-31
#Defining some of the functions used def de2bi(d, n): d = np.array(d) power = 2**np.arange(n) return (np.floor((d[:,None]%(2*power))/power)) def bi2de(L,m): Ldec = np.zeros((int(2**m),1), dtype = np.int) for i in range(int(m)): for j in range(int(2**m)): Ldec[j] = Ldec[j] + L[j,i]*2**i return Ldec def get_labeling(m): M = 2**m if m == 1: L = np.asarray([[0],[1]]) else: L = np.zeros((M,m)) L[0:int(M/2),1:m] = get_labeling(m-1) L[int(M/2):M, 1:m] = np.flipud(L[0:int(M/2),1:m]) L[int(M/2):M,0] = 1 return L def get_constellation(M): Delta = np.sqrt(3/(2*(M-1))) Xpam = np.expand_dims(Delta*np.arange(-(np.sqrt(M)-1),np.sqrt(M)+1,2),axis = 1) xpamt_2D = np.tile(Xpam,(1,int(np.sqrt(M)))) xpamt = np.expand_dims(xpamt_2D.flatten(),axis = 1) X = np.transpose(np.reshape(np.asarray([xpamt, np.tile(Xpam,(int(np.sqrt(M)),1))]),(2,M))) Ltmp = get_labeling(int(np.log2(M)/2)) Ltmp_dec = bi2de(Ltmp,int(np.log2(M))/2) Ltmp_dec2 = np.tile(Ltmp_dec,(1,int(np.sqrt(M)))) Ltmp_dec3 = np.expand_dims(Ltmp_dec2.flatten(),axis = 1) L = np.concatenate((np.reshape(de2bi(Ltmp_dec3,int(np.log2(M)/2)),(M,int(np.log2(M)/2))), np.tile(Ltmp,(int(np.sqrt(M)),1))), axis = 1) Ldec = np.reshape(np.asarray(bi2de(np.fliplr(L),int(np.log2(M))),dtype = np.int),M) return [X,Ldec] def get_APSK(M2,Amount_of_Rings): M = int(M2/Amount_of_Rings) * np.ones(Amount_of_Rings, dtype = np.int) X = np.zeros((sum(M),2)) if Amount_of_Rings > 1: idx = 0 l_r1 = get_labeling(int(np.log2(M[0])))*M.shape[0] ldec_r1 = bi2de(l_r1,int(np.log2(M[0]))) print('a') l_rs = get_labeling(int(np.log2(M.shape[0]))) ldec_rs = bi2de(l_rs,int(np.log2(M.shape[0]))) l_apsk = np.zeros((sum(M),1),dtype = np.int) for i in range(M.shape[0]): R = np.sqrt(-np.log(1-(i+0.5)*1/M.shape[0])) for j in range(M[i]): X[idx+j,:] = [R*np.cos(j*2*math.pi/M[i]), R*np.sin(j*2*math.pi/M[i]) ] l_apsk[idx:idx+M[i],] = ldec_r1 + ldec_rs[i] idx = idx + M[i] l_apsk = np.squeeze(l_apsk) else: Lbin = get_labeling(int(np.log2(M[0]))) l_apsk = np.squeeze(bi2de(Lbin,int(np.log2(M[0])))) for j in range(M2): X[j,:] = [np.cos(j*2*math.pi/M2), np.sin(j*2*math.pi/M2)] return [X, l_apsk] def get_constellation_4D(M): m = int(np.log2(M)) m_pd = int(m/4) Delta = np.sqrt(np.sqrt(3/(2*(M-1)))) Xpam = np.expand_dims(Delta*np.arange(-(np.sqrt(np.sqrt(M))-1),np.sqrt(np.sqrt(M))+1,2),axis = 1) Lpam = get_labeling(m_pd) X1 = (np.expand_dims(np.ones(int(M/2**m_pd)),axis= 1)*np.transpose(Xpam)).flatten('F') X2 = np.tile((np.expand_dims(np.ones(int(M/2**(m_pd*2))),axis= 1)*np.transpose(Xpam)).flatten('F'), 2**m_pd) X3 = np.tile((np.expand_dims(np.ones(int(M/2**(m_pd*3))),axis= 1)*np.transpose(Xpam)).flatten('F'), 2**(m_pd*2)) X4 = np.tile((np.expand_dims(np.ones(int(M/2**(m_pd*4))),axis= 1)*np.transpose(Xpam)).flatten('F'), 2**(m_pd*3)) L1 = np.reshape(np.tile(Lpam,(int(M/2**m_pd))), (M,m_pd)) L2 = np.tile(np.reshape(np.tile(Lpam,(int(M/2**(m_pd*2)))), (int(M/2**m_pd),m_pd)),(2**m_pd,1)) L3 = np.tile(np.reshape(np.tile(Lpam,(int(M/2**(m_pd*3)))), (int(M/2**(m_pd*2)),m_pd)),(2**(m_pd*2),1)) L4 = np.tile(np.reshape(np.tile(Lpam,(int(M/2**(m_pd*4)))), (int(M/2**(m_pd*3)),m_pd)),(2**(m_pd*3),1)) X = np.transpose(np.asarray([X1,X2,X3,X4])) Lbin =np.asarray(np.concatenate((L1,L2,L3,L4),axis = 1), dtype = np.int) Ldec = np.squeeze(bi2de(Lbin,m)) return [X,Ldec] def get_APSK_4D(M, Amount_of_Rings): m = int(np.log2(M)) M2 = int(np.sqrt(M)) X_2D, l_apsk_2D = get_APSK(M2,Amount_of_Rings) Lbin_2D = de2bi(l_apsk_2D, int(m/2)) X1 = np.repeat(X_2D,M2,axis = 0) X2 = np.reshape(np.repeat(X_2D,M2,axis = 1), (M,2), 'F') L1 = np.repeat(Lbin_2D, M2, axis = 0) L2 = np.reshape(np.repeat(Lbin_2D,M2,axis = 1), (M,int(m/2)), 'F') X = np.concatenate([X1,X2], axis = 1) Lbin = np.concatenate([L1,L2], axis = 1) Ldec = np.squeeze(bi2de(Lbin,m)) return[X,Ldec] def get_Optimized_4D(X_2D, l_2D, M): m = int(np.log2(M)) M2 = int(np.sqrt(M)) Lbin_2D = de2bi(l_2D, int(m/2)) X1 = np.repeat(X_2D,M2,axis = 0) X2 = np.reshape(np.repeat(X_2D,M2,axis = 1), (M,2), 'F') L1 = np.repeat(Lbin_2D, M2, axis = 0) L2 = np.reshape(np.repeat(Lbin_2D,M2,axis = 1), (M,int(m/2)), 'F') X = np.concatenate([X1,X2], axis = 1) Lbin = np.concatenate([L1,L2], axis = 1) Ldec = np.squeeze(bi2de(Lbin,m)) return[X,Ldec] def get_Random_4D(M): m = int(np.log2(M)) M2 = int(M/16) X_G = abs(np.random.normal(0,1,(M2,4))) L_2D = get_labeling(m) L_Q = get_labeling(4) X_T = np.tile(X_G,[16,1]) Q_M = 2*L_Q -1 Q_M = np.repeat(Q_M,M2, axis = 0) X_RN = X_T*Q_M L_2D = np.tile(L_2D,[16,1]) L_Q = np.repeat(L_Q,M2,axis = 0) Lbin = np.concatenate([L_2D,L_Q],axis = 1) Ldec = np.squeeze(bi2de(Lbin,m)) return[X_RN,Ldec] def awgn_channel(x): return x + np.sqrt(sigma2)*tr.randn(M*stacks,channel_uses).to(Device) def normalization(x): # E[|x|^2] = 1 return x / tr.sqrt((channel_uses*(x**2)).mean()) def save(): tr.save({ 'model_state_dict' : encoder.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss_history, 'constellations': Constellations, 'SNR': EsNo_dB, 'epochs' : epochs, 'stacks' : stacks, 'learning_rate': learning_rate, 'Device': Device, 'time': time.time()-start_time, 'Ldec': idx_train, 'Lbin': de2bi(idx_train,m)},'./Data/GMI/' + str(channel_uses) + 'D/' + str(M) + '/GMI_' + Estimation_type + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr_' + Initial_Constellation) sio.savemat('./Data/GMI/' + str(channel_uses) + 'D/' + str(M) + '/GMI_' + Estimation_type + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr_' + Initial_Constellation + '.mat', { 'model_state_dict' : encoder.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss_history, 'constellations': Constellations, 'SNR': EsNo_dB, 'epochs' : epochs, 'stacks' : stacks, 'learning_rate': learning_rate, 'Device': Device, 'time': time.time()-start_time, 'Ldec': idx_train, 'Lbin': de2bi(idx_train,m)}) 解释并注释上述代码
最新发布
08-16
def compute_mutual_info(constellation, H, sigma_n2, p, num_samples=10000): """ 计算给定概率分布下的互信息 参数: constellation: 星座点数组 (1D或2D array) H: 信道增益 (float) sigma_n2: 噪声方差 p: 发射概率分布 num_samples: 蒙特卡洛样本数 返回: I: 互信息值 (bits/symbol) """ M = len(constellation) dim = 1 if np.isrealobj(constellation) else 2 # 判断星座维度 # 计算当前平均功率 E_avg = np.sum(p * np.abs(constellation)**2) # 对每个星座点计算D_i D = np.zeros(M) for i in range(M): # 生成噪声样本 if dim == 1: noise = np.random.normal(0, np.sqrt(sigma_n2), num_samples) y_samples = H * constellation[i] + noise else: # 二维星座 noise_real = np.random.normal(0, np.sqrt(sigma_n2/2), num_samples) noise_imag = np.random.normal(0, np.sqrt(sigma_n2/2), num_samples) noise = noise_real + 1j*noise_imag y_samples = H * constellation[i] + noise # 计算f_Y(y_samples) f_Y = np.zeros(num_samples, dtype=np.float64) for j in range(M): # 条件概率密度 f_{Y|X}(y|x_j) if dim == 1: f_Y_given_X = norm.pdf(y_samples, loc=H*constellation[j], scale=np.sqrt(sigma_n2)) else: # 二维星座 dist_real = np.real(y_samples) - H*np.real(constellation[j]) dist_imag = np.imag(y_samples) - H*np.imag(constellation[j]) exponent = -(dist_real**2 + dist_imag**2)/sigma_n2 f_Y_given_X = (1/(np.pi*sigma_n2)) * np.exp(exponent) f_Y += p[j] * f_Y_given_X # 计算f_{Y|X}(y|x_i) if dim == 1: f_Y_given_X_i = norm.pdf(y_samples, loc=H*constellation[i], scale=np.sqrt(sigma_n2)) else: dist_real = np.real(y_samples) - H*np.real(constellation[i]) dist_imag = np.imag(y_samples) - H*np.imag(constellation[i]) exponent = -(dist_real**2 + dist_imag**2)/sigma_n2 f_Y_given_X_i = (1/(np.pi*sigma_n2)) * np.exp(exponent) # 计算log2比值 with np.errstate(divide='ignore', invalid='ignore'): log_ratio = np.log2(f_Y_given_X_i / (f_Y + 1e-300)) # 避免除以0 # 处理NaN值 valid_indices = ~np.isnan(log_ratio) & ~np.isinf(log_ratio) D[i] = np.mean(log_ratio[valid_indices]) # 计算互信息 I = np.sum(p * D) return I def blahut_arimoto(constellation, H, snr_db, num_samples=10000, max_iter=100, tol=1e-6): """ 使用Blahut-Arimoto算法计算MQAM信道容量 参数: constellation: 星座点数组 (1D或2D array) H: 信道增益 (float) snr_db: 信噪比 (dB) num_samples: 蒙特卡洛样本数 max_iter: 最大迭代次数 tol: 收敛容忍度 返回: C: 信道容量 (bits/symbol) p_opt: 最优概率分布 I_uniform: 均匀分布下的互信息 converged: 是否收敛 constellation_opt: 优化后的星座点 """ M = len(constellation) dim = 1 if np.isrealobj(constellation) else 2 # 判断星座维度 # 初始星座点(归一化前) original_constellation = constellation.copy() # 计算初始平均符号能量 (均匀分布假设)并归一化 E_avg_uniform = np.mean(np.abs(constellation)**2) scale_factor = np.sqrt(1 / E_avg_uniform) constellation = constellation * scale_factor E_avg_uniform = np.mean(np.abs(constellation)**2) # 归一化后应为1 # 将SNR(dB)转换为线性值 snr_lin = 10**(snr_db / 10) # 计算噪声方差 (基于归一化后的星座点) sigma_n2 = E_avg_uniform / snr_lin # 初始化均匀分布 p = np.ones(M) / M # 计算均匀分布下的互信息 print("计算均匀分布下的互信息...") start_time = time.time() I_uniform = compute_mutual_info(constellation, H, sigma_n2, p, num_samples) print(f"均匀分布互信息: {I_uniform:.6f} bits/symbol, 用时: {time.time()-start_time:.2f}秒") # BA算法初始化 I_prev = 0 converged = False constellation_history = [constellation.copy()] power_history = [np.sum(p * np.abs(constellation)**2)] I_history = [I_uniform] print("\n开始Blahut-Arimoto优化...") for iter in range(max_iter): iter_start = time.time() # 计算D_i: 对每个星座点进行蒙特卡洛积分 D = np.zeros(M) for i in range(M): # 生成噪声样本 if dim == 1: noise = np.random.normal(0, np.sqrt(sigma_n2), num_samples) y_samples = H * constellation[i] + noise else: # 二维星座 noise_real = np.random.normal(0, np.sqrt(sigma_n2/2), num_samples) noise_imag = np.random.normal(0, np.sqrt(sigma_n2/2), num_samples) noise = noise_real + 1j*noise_imag y_samples = H * constellation[i] + noise # 计算f_Y(y_samples) f_Y = np.zeros(num_samples, dtype=np.float64) for j in range(M): # 条件概率密度 f_{Y|X}(y|x_j) if dim == 1: f_Y_given_X = norm.pdf(y_samples, loc=H*constellation[j], scale=np.sqrt(sigma_n2)) else: dist_real = np.real(y_samples) - H*np.real(constellation[j]) dist_imag = np.imag(y_samples) - H*np.imag(constellation[j]) exponent = -(dist_real**2 + dist_imag**2)/sigma_n2 f_Y_given_X = (1/(np.pi*sigma_n2)) * np.exp(exponent) f_Y += p[j] * f_Y_given_X # 计算f_{Y|X}(y|x_i) if dim == 1: f_Y_given_X_i = norm.pdf(y_samples, loc=H*constellation[i], scale=np.sqrt(sigma_n2)) else: dist_real = np.real(y_samples) - H*np.real(constellation[i]) dist_imag = np.imag(y_samples) - H*np.imag(constellation[i]) exponent = -(dist_real**2 + dist_imag**2)/sigma_n2 f_Y_given_X_i = (1/(np.pi*sigma_n2)) * np.exp(exponent) # 计算log2比值 with np.errstate(divide='ignore', invalid='ignore'): log_ratio = np.log2(f_Y_given_X_i / (f_Y + 1e-300)) # 避免除以0 # 处理NaN值 valid_indices = ~np.isnan(log_ratio) & ~np.isinf(log_ratio) if np.any(valid_indices): D[i] = np.mean(log_ratio[valid_indices]) else: D[i] = 0 # 更新概率分布 (数值稳定处理) D_max = np.max(D) p_new = p * np.exp(D - D_max) # 减去最大值避免指数爆炸 p_new /= np.sum(p_new) # 计算当前平均功率 current_avg_power = np.sum(p_new * np.abs(constellation)**2) # 重新归一化星座点以保持单位平均功率 scale_factor = np.sqrt(1 / current_avg_power) constellation = constellation * scale_factor constellation_history.append(constellation.copy()) # 计算互信息 (使用重新归一化后的星座点) I = compute_mutual_info(constellation, H, sigma_n2, p_new, num_samples//10) # 使用较少样本加速 # 记录历史数据 power_history.append(current_avg_power) I_history.append(I) # 打印迭代信息 iter_time = time.time() - iter_start print(f"迭代 {iter+1}: 互信息 = {I:.6f}, 平均功率 = {current_avg_power:.6f}, 用时: {iter_time:.2f}秒") # 检查收敛 if np.abs(I - I_prev) < tol: converged = True print(f"在 {iter+1} 次迭代后收敛") break I_prev = I p = p_new # 最终优化后的星座点 constellation_opt = constellation return I, p, I_uniform, converged, constellation_opt, constellation_history, power_history, I_history def plot_constellation(constellation, p, title, ax=None): """绘制星座图及概率分布""" if ax is None: fig, ax = plt.subplots(figsize=(10, 8)) else: fig = ax.figure if np.isrealobj(constellation): # 一维星座 for i, (x_val, prob) in enumerate(zip(constellation, p)): ax.scatter(x_val, 0, s=prob*5000, alpha=0.7) ax.text(x_val, 0.05, f'p={prob:.4f}', ha='center') ax.set_yticks([]) ax.set_ylim(-0.1, 0.1) else: # 二维星座 real = np.real(constellation) imag = np.imag(constellation) for i, (x_val, y_val) in enumerate(zip(real, imag)): ax.scatter(x_val, y_val, s=p[i]*5000, alpha=0.7) ax.text(x_val, y_val+0.05, f'p={p[i]:.4f}', ha='center') ax.set_title(title) ax.set_xlabel('Real') ax.set_ylabel('Imaginary') ax.grid(True) ax.axis('equal') return fig, ax def plot_optimization_history(constellation_history, power_history, I_history): """绘制优化过程历史""" fig, axs = plt.subplots(3, 1, figsize=(12, 15), sharex=True) # 星座点位置变化 axs[0].set_title('星座点位置优化过程') for i in range(len(constellation_history[0])): if np.isrealobj(constellation_history[0]): x_vals = [np.real(c[i]) for c in constellation_history] axs[0].plot(x_vals, label=f'点 {i}') else: real_vals = [np.real(c[i]) for c in constellation_history] imag_vals = [np.imag(c[i]) for c in constellation_history] axs[0].plot(real_vals, label=f'实部-点 {i}') axs[0].plot(imag_vals, '--', label=f'虚部-点 {i}') axs[0].set_ylabel('星座点位置') axs[0].legend() axs[0].grid(True) # 平均功率变化 axs[1].plot(power_history, 'o-') axs[1].set_title('平均功率变化') axs[1].set_ylabel('平均功率') axs[1].axhline(1.0, color='r', linestyle='--', alpha=0.7) axs[1].grid(True) # 互信息变化 axs[2].plot(I_history, 'o-') axs[2].set_title('互信息优化过程') axs[2].set_xlabel('迭代次数') axs[2].set_ylabel('互信息 (bits/symbol)') axs[2].grid(True) plt.tight_layout() return fig # 示例使用 if __name__ == "__main__": # 系统参数 H = 1.0 # 信道增益 snr_db = 5 # 信噪比 (dB) num_samples = 10000 # 创建8QAM星座点 constellation = np.array([ complex(-3, 1), complex(-3, -1), complex(-1, 1), complex(-1, -1), complex(1, 1), complex(1, -1), complex(3, 1), complex(3, -1) ]) # 初始归一化到单位平均功率 current_avg_power = np.mean(np.abs(constellation)**2) scale_factor = np.sqrt(1 / current_avg_power) constellation = constellation * scale_factor # 可视化初始星座点 plt.figure(figsize=(8, 8)) plt.scatter(np.real(constellation), np.imag(constellation), s=100, color='blue') for i, point in enumerate(constellation): plt.text(np.real(point)+0.05, np.imag(point)+0.05, f'{i}', fontsize=12) plt.grid(True) plt.axis('equal') plt.xlabel('Real') plt.ylabel('Imaginary') plt.title('初始8QAM星座图') plt.show() # 计算信道容量最优分布 capacity, p_opt, I_uniform, converged, constellation_opt, constellation_history, power_history, I_history = blahut_arimoto( constellation, H, snr_db, num_samples=num_samples, max_iter=100 ) print("\n" + "="*50) print(f"星座图: 8-QAM") print(f"信噪比: {snr_db} dB") print(f"信道增益: {H}") print(f"均匀分布互信息: {I_uniform:.6f} bits/symbol") print(f"信道容量(最优分布): {capacity:.6f} bits/symbol") print(f"容量增益: {capacity - I_uniform:.6f} bits/symbol") print(f"是否收敛: {'是' if converged else '否'}") print("\n最优概率分布:") for i, prob in enumerate(p_opt): print(f" 星座点 {i}: {constellation_opt[i]} - p = {prob:.6f}") # 绘制优化后的星座图 fig, ax = plot_constellation(constellation_opt, p_opt, "优化后的8QAM星座图") plt.show() # 绘制优化过程 fig = plot_optimization_history(constellation_history, power_history, I_history) plt.show() # 比较初始优化后的星座图 fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8)) # 初始星座图 plot_constellation(constellation, np.ones(len(constellation))/len(constellation), "初始星座图", ax=ax1) # 优化后的星座图 plot_constellation(constellation_opt, p_opt, "优化后的星座图", ax=ax2) plt.tight_layout() plt.show() 帮我检查当前代码,我的目的是通过改变星座点的概率,同时通过缩放星座图保证功率归一化(即平均功率不变)的情况下,得到最大互信息。我怀疑代码中更新频率的逻辑存在问题,当前代码运行过程中随着迭代次数的增加,互信息是整体下降的。我希望你能检查代码并修改,另外最优概率分布只显示了四个点的信息,显示不完全,请修正
08-16
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值