PyTorch 深度学习实践 第5讲 课后习题
由于本节习题很少看到比较完整的代码, 因此上传一个给大家参考.
B站 刘二大人
这里要注意一下, 在对不同优化器进行比较的过程中, 需要保持初始参数值一致, 这样才有可比性
# 练习5-1 尝试不同优化器训练线性模型,并保存图像
"""
Adagrad
Adam
Adamax"
ASGD
LBFGS
RMSprop
Rprop
SGD
"""
import torch
import matplotlib.pyplot as plt
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]])
class LinearModel(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.linear = torch.nn.Linear(1, 1)
self.linear.weight.requires_grad = True
self.linear.bias.requires_grad = True
def forward(self, x):
x = self.linear(x)
return x
def init_weights(m):
m.linear.weight.data.fill_(1.0)
m.linear.bias.data.fill_(0.0)
model = LinearModel()
criterion = torch.nn.MSELoss(reduction='sum')
def optim_select(name: str):
name_list = [
"Adagrad", "Adam", "Adamax", "ASGD", "LBFGS", "RMSprop", "Rprop", "SGD"
]
if name == "LBFGS":
return eval(f"torch.optim.{name}(model.parameters(), lr=0.001)")
if name in name_list:
return eval(f"torch.optim.{name}(model.parameters(), lr=0.01)")
else:
raise NameError(f"{name}不是一个支持的优化器")
plt.figure(num='fig1')
for optime in ["Adagrad", "Adam"]:
print(f"当前优化器为{optime}")
init_weights(model)
optimezer = optim_select(optime)
l_list = []
for epoch in range(1000):
y_pred = model(x_data)
l = criterion(y_pred, y_data)
l_list.append(l.item())
if epoch % 200 == 0:
print(f"{epoch} -- {l.item():.3f}")
optimezer.zero_grad()
l.backward()
optimezer.step()
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print("y_pred =", y_test.data.item())
plt.plot(range(epoch + 1), l_list, label=f"{optime}")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.title("epoch-loss")
plt.legend()
plt.show()
plt.figure(num='fig2')
for optime in ["Adamax", "ASGD", "RMSprop", "Rprop", "SGD"]:
print(f"当前优化器为{optime}")
init_weights(model)
optimezer = optim_select(optime)
l_list = []
for epoch in range(100):
y_pred = model(x_data)
l = criterion(y_pred, y_data)
l_list.append(l.item())
if epoch % 20 == 0:
print(f"{epoch} -- {l.item():.3f}")
optimezer.zero_grad()
l.backward()
optimezer.step()
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print("y_pred =", y_test.data.item())
plt.plot(range(epoch + 1), l_list, label=f"{optime}")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.title("epoch-loss")
plt.legend()
plt.show()
def closure(): # - LBFGS需要传入闭包函数作为参数
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
return loss
l_list = []
plt.figure(num='fig3')
for optime in ["LBFGS"]:
print(f"当前优化器为{optime}")
init_weights(model)
optimezer = optim_select(optime)
for epoch in range(1000):
optimezer.zero_grad()
loss = closure()
if epoch % 100 == 0:
print(f"{epoch} -- {loss.item():.3f}")
loss.backward()
l_list.append(loss.item())
optimezer.step(closure)
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print("y_pred =", y_test.data.item())
plt.plot(range(epoch + 1), l_list, label=f"{optime}")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.title("epoch-loss")
plt.legend()
plt.text(600, 8, f'traing epoch: {epoch+1}\nlearning rate: 0.001')
plt.show()
图片不知为啥上传不了, 大家可以copy代码本地运行查看结果