import numpy as np
import torch
def def_k_coefficient( k_shape ):
k_coefficient_real_img = np.random.rand(k_shape,2)
return k_coefficient_real_img
def trans_3_27_to_esum(all_point,mode_temp01_tensor , mode_temp_tensor):
import numpy as np
all_point = (all_point)
mode_fengliang , number_of_points ,shibu_xubu = all_point.shape
for a1 in range(number_of_points):
for a2 in range(mode_fengliang):
mode_temp01_tensor[a2 , a1, 0 ] = torch.sqrt( (mode_temp01_tensor[a2 , a1, 0 ])**2 +(all_point[a2,a1,0])**2)
for a1 in range(number_of_points):
for a3 in range(mode_fengliang):
mode_temp_tensor[0,a1] = mode_temp_tensor[0,a1] +(mode_temp01_tensor[a3 , a1, 0 ] )**2
mode_temp_tensor =mode_temp_tensor.view(number_of_points,1)
return mode_temp_tensor
def distanceFrom_A_To_B(array1 ,array2 ):
import numpy as np
rows ,cols = (array1.shape)
sum =torch.tensor(0.0,dtype=torch.float64)
for row in range(rows):
temp_sum = array1[row ,0 ] - array2[row ,0]
sum = sum + temp_sum**2
sum = sum *1.0
return sum
def def_the_want_point_value( preSETpoint_number = 27 ,middle_point_intensity = 1):
temp = np.zeros(shape = (preSETpoint_number,1) )
temp[0,0] = middle_point_intensity
wanted_point = np.resize(np.array(temp), preSETpoint_number)
print(wanted_point)
return wanted_point
def zuobiaoGet(middle_point,zhongxingdianqiangdu,banfenkuang_int):
zuobiao = [(middle_point, middle_point)]
result_wanted = [zhongxingdianqiangdu]
for i in range(banfenkuang_int, 3 * banfenkuang_int, 1):
result_wanted.append(0.0)
zuobiao_x = middle_point
zuobiao_y = middle_point + i
zuobiao.append((zuobiao_x, zuobiao_y))
print(zuobiao)
length = len(result_wanted)
result_wanted = np.array(result_wanted)
result_wanted = np.resize(result_wanted,new_shape=(length,1))
return zuobiao ,result_wanted
def read_Data(file_place='../mode_numpy//80mode.npy'):
import numpy as np
all_point = np.load(file_place)
mode_number, mode_fenliang, numberofPreset = all_point.shape
all_point_real_img = np.zeros((mode_number, mode_fenliang, numberofPreset,1+1),dtype=float)
for a1 in range(mode_number):
for a2 in range(mode_fenliang):
for a3 in range(numberofPreset):
for a4 in range(1+1):
all_point_real_img[a1,a2,a3,0] = np.real(all_point[a1,a2,a3])
all_point_real_img[a1, a2, a3, 0+1] = np.imag(all_point[a1, a2, a3])
return all_point_real_img
def readOriginData(file_place, numberofPreset,zuobiao ,mode_number ,mode_fenliang):
import numpy as np
mode_org = file_place
mode_orign = np.load(mode_org)
a, b = mode_orign.shape
jingdu = int((max(a, b)) ** 0.5)
mode_number = mode_number
mode_fenliang = mode_fenliang
mode_fushu = np.zeros(shape = (mode_number, mode_fenliang, jingdu * jingdu), dtype=complex)
for col in range(jingdu ** 2):
for i in range(mode_number):
ex_start = i * 12 + 0
ey_start = i * 12 + 2
ez_start = i * 12 + 4
mode_fushu[i, 0, col] = mode_fushu[i, 0, col] +mode_orign[col, ex_start] + 1j * mode_orign[col, ex_start + 1]
mode_fushu[i, 1, col] = mode_fushu[i, 1, col] +mode_orign[col, ey_start] + 1j * mode_orign[col, ey_start + 1]
mode_fushu[i, 2, col] = mode_fushu[i, 2, col] +mode_orign[col, ez_start] + 1j * mode_orign[col, ez_start + 1]
mode_fushu = mode_fushu
all_point = np.zeros((mode_number, mode_fenliang, numberofPreset), dtype=complex)
for number in range(mode_number):
for fenliang in range(mode_fenliang):
temp_mode = np.resize(mode_fushu[number, fenliang, :], new_shape=(jingdu, jingdu))
numberPoint = 0
for x, y in zuobiao:
x = int(x)
y = int(y)
all_point[number, fenliang, numberPoint] = temp_mode[x, y]
numberPoint = numberPoint + 1
all_point = all_point
all_point_real_img = np.zeros((mode_number, mode_fenliang, numberofPreset,1+1),dtype=float)
for a1 in range(mode_number):
for a2 in range(mode_fenliang):
for a3 in range(numberofPreset):
for a4 in range(1+1):
all_point_real_img[a1,a2,a3,0] = np.real(all_point[a1,a2,a3])
all_point_real_img[a1, a2, a3, 0+1] = np.imag(all_point[a1, a2, a3])
return all_point_real_img
def k_x_point( k_coefficient_real_img_tensor , point_real_img ,result_fushu_tensor ):
mode_number,mode_fengliang ,numbers_point, shibu_xubu = point_real_img.shape
for a1 in range(mode_fengliang):
for a2 in range(numbers_point):
for a3 in range(mode_number):
result_fushu_tensor[a1,a2,0] = result_fushu_tensor[a1,a2,0] +\
point_real_img_tensor[a3,a1,a2,0]*k_coefficient_real_img_tensor[a3,0] +(-1)*point_real_img_tensor[a3,a1,a2,1]*k_coefficient_real_img_tensor[a3,1]
result_fushu_tensor[a1, a2, 1] = result_fushu_tensor[a1, a2, 1] + \
point_real_img_tensor[a3, a1, a2, 1] * k_coefficient_real_img_tensor[
a3, 0] + (1) * point_real_img_tensor[a3, a1, a2, 0] * \
k_coefficient_real_img_tensor[a3, 1]
return result_fushu_tensor
if __name__ == '__main__':
import torch
k_shape = 40
k_coefficient_real_img = def_k_coefficient(k_shape= k_shape)
k_coefficient_real_img_tensor = torch.from_numpy(k_coefficient_real_img)
k_coefficient_real_img_tensor.requires_grad_(True)
mode_number = 40
mode_fengliang = 3
intensity_middle_point = 1
jingdu = 801
banfenkuang_int=int(jingdu*0.33/10)
zuobiao ,result_wanted = zuobiaoGet(middle_point=jingdu//2
,zhongxingdianqiangdu=intensity_middle_point
,banfenkuang_int=int(jingdu*0.33/10))
zuobiao = zuobiao
result_wanted_tensor = torch.from_numpy(result_wanted)
result_wanted_tensor.double()
result_wanted_tensor.requires_grad_(False)
point_real_img = read_Data(file_place='../mode_numpy'
'//allpoint_40_3_27_complex.npy')
point_real_img_tensor = torch.from_numpy(point_real_img)
point_real_img_tensor.requires_grad_(False)
mode_number , mode_fengliang , numberOfpoints ,shibuxubu = point_real_img_tensor.shape
result_fushu_tensor = torch.zeros(size=(mode_fengliang,numberOfpoints,shibuxubu),dtype=torch.float64)
mode_number, mode_fengliang, numbers_point, shibu_xubu = point_real_img.shape
for a1 in range(mode_fengliang):
for a2 in range(numbers_point):
for a3 in range(mode_number):
result_fushu_tensor[a1, a2, 0] = (result_fushu_tensor[a1, a2, 0]).clone() + \
( point_real_img_tensor[a3, a1, a2, 0] ).clone() *k_coefficient_real_img_tensor[
a3, 0] + (-1) * ( point_real_img_tensor[a3, a1, a2, 1]).clone() * \
k_coefficient_real_img_tensor[a3, 1]
result_fushu_tensor[a1, a2, 1] = ( result_fushu_tensor[a1, a2, 1]).clone() + \
( point_real_img_tensor[a3, a1, a2, 1] ).clone() * k_coefficient_real_img_tensor[
a3, 0] + (1) * ( point_real_img_tensor[a3, a1, a2, 0] ).clone() * \
k_coefficient_real_img_tensor[a3, 1]
result_fushu_tensor = (result_fushu_tensor).clone()
mode_temp01_tensor = torch.zeros(size=(mode_fengliang, numberOfpoints, 1), dtype=torch.float64)
result_sum_tensor = torch.zeros(size=(1, numberOfpoints), dtype=torch.float64)
mode_temp_tensor = torch.zeros(size=(1, numberOfpoints), dtype=torch.float64)
all_point = (result_fushu_tensor)
mode_fengliang, number_of_points, shibu_xubu = all_point.shape
for a1 in range(number_of_points):
for a2 in range(mode_fengliang):
mode_temp01_tensor[a2, a1, 0] = torch.sqrt(
(mode_temp01_tensor[a2, a1, 0]).clone() ** 2 + (all_point[a2, a1, 0]).clone() ** 2
)
for a1 in range(number_of_points):
for a3 in range(mode_fengliang):
mode_temp_tensor[0, a1] = ( mode_temp_tensor[0, a1] ).clone() + ((mode_temp01_tensor[a3, a1, 0]) ).clone() ** 2
mode_temp_tensor = mode_temp_tensor.view(number_of_points, 1)
result_sum_tensor = mode_temp_tensor
loss_fn = torch.nn.MSELoss(reduce=True , size_average=False)
distance = loss_fn(result_sum_tensor,result_wanted_tensor)
print(distance)
distance.backward( )
k_gradient = k_coefficient_real_img_tensor.grad
print('k_gradient is' , k_gradient)
print('want point ', result_wanted_tensor)
print('real point ', result_sum_tensor)
print()
print('now dist is ',distance )