具体代码
import torch
import torch.nn as nn
from torch.optim import SGD
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
x = torch.rand([500, 1]).to(device)
y_true = 6 * x + 2.5
class MyLinear(nn.Module):
def __init__(self):
super(MyLinear, self).__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
out = self.linear(x)
return out
model = MyLinear().to(device)
optimizer = SGD(model.parameters(), 0.001)
loss_fn = nn.MSELoss()
for epoch in range(50000):
y_predict = model(x)
loss = loss_fn(y_predict, y_true)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0:
params = list(model.parameters())
print(f'loss:{loss.item()} {params[0].item()} {params[1].item()}')
PyTorch线性回归实战
本文通过使用PyTorch实现线性回归模型,详细介绍了如何定义模型结构、设置损失函数及优化器,并通过迭代训练调整参数以逼近真实数据分布。
881

被折叠的 条评论
为什么被折叠?



