torch.reshape()函数解读

函数作用
变换张量tensor的形状,注意两个数据类型都是张量。
代码


    >>> c=torch.randn((2,5))
>>> print(c)
tensor([[ 1.0559, -0.3533,  0.5194,  0.9526, -0.2483],
        [-0.1293,  0.4809, -0.5268, -0.3673,  0.0666]])
>>> d=torch.reshape(c,(5,2))
>>> print(d)
tensor([[ 1.0559, -0.3533],
        [ 0.5194,  0.9526],
        [-0.2483, -0.1293],
        [ 0.4809, -0.5268],
        [-0.3673,  0.0666]])

可见我们的reshape是按照行来进行reshape(变形)的。

详细解释: # ### 2.0 二次非线性拟合 # #### 2.1 构造多项式计算过程 # In[17]: #约定为一维tensor def factor_gen(tensor): factorList=[] for i in range(tensor.shape[0]): for j in range(i,tensor.shape[0]): factorList.append(torch.mul(tensor[i],tensor[j]).reshape(1,-1)) for i in range(tensor.shape[0]): factorList.append(tensor[i].reshape(1,-1)) return torch.cat(factorList,dim=1) # seed=torch.randn(1,10) # factor_gen(seed) # In[18]: t1=torch.tensor([1,2,3,4,5,6,7,8,9,10]) factor_gen(t1).shape # In[19]: factor_gen(x_tensor[0,:]).shape # factor_gen(torch.unsqueeze(x_tensor[0,:],dim=0)) # In[20]: tensorList=[] for i in range(0,x_tensor.shape[0]): tensorList.append(factor_gen(x_tensor[i,:])) featureMatrix=torch.stack(tensorList) featureMatrix=torch.squeeze(featureMatrix) # In[21]: featureMatrix.shape # In[22]: featureMatrix # In[23]: tensorList[1].shape # In[54]: model_2=LinearRegression(65,2) criterion=nn.MSELoss() optimizer=torch.optim.Adam(model_2.parameters(),lr=0.01) target=torch.tensor([[0.0],[0.0]]) num_epochs=50000 for epoch in range(num_epochs): # Forward pass and loss y_hat=model_2(featureMatrix) loss=criterion(y_hat,y_tensor) optimizer.zero_grad() loss.backward() optimizer.step() if (epoch+1) % 10000 == 0: print(f'epoch: {epoch+1}, loss = {loss.item():.4f}') # In[25]: y_hat=model_2(featureMatrix) y_hat # In[26]: y_tensor # In[55]: weight=model_2.state_dict()['linear.weight'] torch.matmul(weight,factor_gen(x_tensor[5,:]).reshape(-1,1)) # In[56]: weight # #### 2.x test line # In[60]: bounds=[(100,66),(100,30),(48,20),(48,20),(48,20),(48,20),(45,20),(80,30),(65,0),(65,40)] tScale=0.01 #test_candidate=torch.randn(65,1,requires_grad=True) candidate=torch.rand(10,requires_grad=True) factors=weight.detach() target=torch.tensor([[-0.9],[-0.9]]) optimizer=torch.optim.Adam([candidate],lr=0.01) criterion=torch.nn.MSELoss() eporch_num=5000 for epoch in range(eporch_num): optimizer.zero_grad() #result=torch.matmul(featureTensor,factors) test_candidate=factor_gen(candidate) #test_candidate.requires_grad=True test_candidate=test_candidate.reshape(-1,1) result=torch.matmul(factors,test_candidate) loss=criterion(result,target) #loss=torch.subsresult-target loss.backward(retain_graph=True) optimizer.step() with torch.no_grad(): # for i in range(candidate.shape[0]): # if candidate[i]<0: # candidate[i]=0# 将参数裁剪到大于 0 for i,(max_val,min_val) in enumerate(bounds): candidate.data[i]=torch.clamp(candidate.data[i],max=max_val*tScale,min=min_val*tScale) #pass if (epoch+1)%1000==0: print(f"epoch:{epoch+1},loss:{loss.item()}") #print(loss) #print(params.grad) #print(f"result params:{params.detach().numpy()}") #print(f"result params:{test_candidate.reshape(1,-1)}") # print(f"factors value: {factors.reshape(1,-1)}") # In[61]: candidate*100 # In[59]: torch.matmul(factors,factor_gen(candidate).reshape(-1,1)) # In[32]: sample=factor_gen(x_tensor) sample.shape #torch.matmul(factors,x_tensor[]) # In[33]: for param in model.parameters(): print(param.grad)
03-24
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值