batch_size =10for X, y in data_iter(batch_size, features, labels):print(X,'\n', y)break
# 初始化模型参数
w = torch.normal(0,0.01, size=(2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)# 定义模型deflinreg(X, w, b):return torch.matmul(X, w)+ b
# 定义损失函数defsquared_loss(y_hat, y):"""MSRE"""return(y_hat - y)**2/2# 定义优化算法defsgd(params, lr, batch_size):"""小批量随机下降"""with torch.no_grad():for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
# training
lr =0.03
num_epochs =5
net = linreg
loss = squared_loss
for epoch inrange(num_epochs):for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y)
l.sum().backward()
sgd([w, b], lr, batch_size)with torch.no_grad():
train_l = loss(net(features, w, b), labels)print(f'epoch {epoch +1}, loss {float(train_l.mean()):f}')"""
epoch 1, loss 0.029100
epoch 2, loss 0.000103
epoch 3, loss 0.000050
epoch 4, loss 0.000050
epoch 5, loss 0.000050
"""
线性回归的简洁实现
import numpy as np
import torch
import random
from torch.utils import data
from d2l import torch as d2l
# 生成数据集defsynthetic_data(w, b, num_examples):"""y = Xw + b + noise"""
X = torch.normal(0,1,(num_examples,len(w)))
y = torch.matmul(X, w)+ b
y += torch.normal(0,0.01, y.shape)return X, y.reshape((-1,1))
true_w = torch.tensor([2,-3.4])
true_b =4.2
features, labels = d2l.synthetic_data(true_w, true_b,1000)# 生成数据集defload_array(data_arrays, batch_size, is_train=True):"""构造PyTorch数据迭代器"""
dataset = data.TensorDataset(*data_arrays)return data.DataLoader(dataset, batch_size, shuffle=is_train)
batch_size =10
data_iter = load_array((features, labels), batch_size)
next(iter(data_iter))
# 定义模型from torch import nn
"""在PyTorch中,全连接层在Linear类中定义"""
net = nn.Sequential(nn.Linear(2,1))"""
将两个参数传递到nn.Linear中。第一个指定输入特征形状,即2,
第二个指定输出特征形状,输出特征形状为单个标量,因此为1
"""# 初始化模型参数
net[0].weight.data.normal_(0,0.01)
net[0].bias.data.fill_(0)# 定义损失函数
loss = nn.MSELoss()# 定义优化算法
trainer = torch.optim.SGD(net.parameters(), lr=0.03)
# training
num_epochs =5for epoch inrange(num_epochs):for X, y in data_iter:
l = loss(net(X), y)
trainer.zero_grad()
l.backward()
trainer.step()
l = loss(net(features), labels)print(f'epoch {epoch +1}, loss {l:f}')"""
epoch 1, loss 0.000259
epoch 2, loss 0.000103
epoch 3, loss 0.000103
epoch 4, loss 0.000104
epoch 5, loss 0.000103
"""