详细解释:
# ### 2.0 二次非线性拟合
# #### 2.1 构造多项式计算过程
# In[17]:
#约定为一维tensor
def factor_gen(tensor):
factorList=[]
for i in range(tensor.shape[0]):
for j in range(i,tensor.shape[0]):
factorList.append(torch.mul(tensor[i],tensor[j]).reshape(1,-1))
for i in range(tensor.shape[0]):
factorList.append(tensor[i].reshape(1,-1))
return torch.cat(factorList,dim=1)
# seed=torch.randn(1,10)
# factor_gen(seed)
# In[18]:
t1=torch.tensor([1,2,3,4,5,6,7,8,9,10])
factor_gen(t1).shape
# In[19]:
factor_gen(x_tensor[0,:]).shape
# factor_gen(torch.unsqueeze(x_tensor[0,:],dim=0))
# In[20]:
tensorList=[]
for i in range(0,x_tensor.shape[0]):
tensorList.append(factor_gen(x_tensor[i,:]))
featureMatrix=torch.stack(tensorList)
featureMatrix=torch.squeeze(featureMatrix)
# In[21]:
featureMatrix.shape
# In[22]:
featureMatrix
# In[23]:
tensorList[1].shape
# In[54]:
model_2=LinearRegression(65,2)
criterion=nn.MSELoss()
optimizer=torch.optim.Adam(model_2.parameters(),lr=0.01)
target=torch.tensor([[0.0],[0.0]])
num_epochs=50000
for epoch in range(num_epochs):
# Forward pass and loss
y_hat=model_2(featureMatrix)
loss=criterion(y_hat,y_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10000 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# In[25]:
y_hat=model_2(featureMatrix)
y_hat
# In[26]:
y_tensor
# In[55]:
weight=model_2.state_dict()['linear.weight']
torch.matmul(weight,factor_gen(x_tensor[5,:]).reshape(-1,1))
# In[56]:
weight
# #### 2.x test line
# In[60]:
bounds=[(100,66),(100,30),(48,20),(48,20),(48,20),(48,20),(45,20),(80,30),(65,0),(65,40)]
tScale=0.01
#test_candidate=torch.randn(65,1,requires_grad=True)
candidate=torch.rand(10,requires_grad=True)
factors=weight.detach()
target=torch.tensor([[-0.9],[-0.9]])
optimizer=torch.optim.Adam([candidate],lr=0.01)
criterion=torch.nn.MSELoss()
eporch_num=5000
for epoch in range(eporch_num):
optimizer.zero_grad()
#result=torch.matmul(featureTensor,factors)
test_candidate=factor_gen(candidate)
#test_candidate.requires_grad=True
test_candidate=test_candidate.reshape(-1,1)
result=torch.matmul(factors,test_candidate)
loss=criterion(result,target)
#loss=torch.subsresult-target
loss.backward(retain_graph=True)
optimizer.step()
with torch.no_grad():
# for i in range(candidate.shape[0]):
# if candidate[i]<0:
# candidate[i]=0# 将参数裁剪到大于 0
for i,(max_val,min_val) in enumerate(bounds):
candidate.data[i]=torch.clamp(candidate.data[i],max=max_val*tScale,min=min_val*tScale)
#pass
if (epoch+1)%1000==0:
print(f"epoch:{epoch+1},loss:{loss.item()}")
#print(loss)
#print(params.grad)
#print(f"result params:{params.detach().numpy()}")
#print(f"result params:{test_candidate.reshape(1,-1)}")
# print(f"factors value: {factors.reshape(1,-1)}")
# In[61]:
candidate*100
# In[59]:
torch.matmul(factors,factor_gen(candidate).reshape(-1,1))
# In[32]:
sample=factor_gen(x_tensor)
sample.shape
#torch.matmul(factors,x_tensor[])
# In[33]:
for param in model.parameters():
print(param.grad)