torch实现一个简单的线性回归,方程为y=2x+10( y=wx+b), 回归的过程也就是求w和b.最后的的演示可以看到模型训练完成后,w=1.9764 , b=10.4458,已经很接近真实的参数.
1.模型
使用torch, 建立线性方程的模型为:y=torch.matmul(x,self.weigth)+self.bias
import os
import sys
import math
import torch
import torch.nn as nn
class MyLayer(nn.Module):
def __init__(self,input_f,output_f,bias=True):
super(MyLayer,self).__init__()
self.input_f=input_f
self.output_f=output_f
self.weigth=nn.Parameter(torch.Tensor(input_f))
self.bias=nn.Parameter(torch.Tensor(input_f))
def forward(self,x):
y=torch.matmul(x, self.weigth)+self.bias
print(self.weigth, self.bias)
return y
2. 模型训练
构造2000个数据对(x和y),其中y是根据x和线性方程计算得到,同时附加了一些噪声,这里容易出错的点的数据维度及数据格式一致性的问题.
import os
import sys
import math
import random
import torch
import torch.nn as nn
from utils.layer import MyLayer
N,D_in,D_out= 2000,1,1
class MyModel(nn.Module):
def __init__(self):
super(MyModel,self).__init__()
self.my_layer= MyLayer(D_in,D_out)
def forward(self,x):
y = self.my_layer(x)
return y
model=MyModel()
print(model)
x=[random.uniform(1,10) for x in range(2000)]
y_delt=[random.uniform(0,1) for i in range(2000)]
y=[2*i+10+j for i,j in zip (x,y_delt)]
x=torch.FloatTensor(x)
y=torch.FloatTensor(y)
loss_fn = nn.MSELoss(reduction="mean")
# loss_fn = nn.MSELoss(reduction="sum")
learning_rate=5e-2
optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(N):
y_pred=model(torch.FloatTensor(x[t]).unsqueeze(dim=0))
loss=loss_fn(y_pred, y[t])
print(f"step {t}: {loss}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
结果: