优化器的使用
1.创建优化器
2.循环内梯度清零
3.计算梯度
4.根据梯度优化loss
例子:
dataset_transform_compose = transforms.Compose([transforms.ToTensor()])
train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, transform=dataset_transform_compose, download=True)
test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=dataset_transform_compose, download=True)
dataloader = DataLoader(test_set, batch_size=64, shuffle=True)
class model(nn.Module):
def __init__(self):
super(model,self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, (5, 5), padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, (5, 5), padding=2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, input):
input = self.model1(input)
return input
model1 = model()
bloss = nn.CrossEntropyLoss()
opti = torch.optim.Adam(model1.parameters(), lr=0.01)##第一步,创建优化器
for epoch in range(5):
epoch_loss = 0.0
for data in dataloader:
imgs, targets = data
output = model1(imgs)
loss = bloss(output, targets)
opti.zero_grad() ##第二步:梯度清零
loss.backward()##第三步:计算梯度
opti.step()##第四步:优化
epoch_loss = epoch_loss + loss
print(epoch_loss)
输出:
tensor(1807.5908, grad_fn=<AddBackward0>)
tensor(276.8998, grad_fn=<AddBackward0>)
tensor(253.0088, grad_fn=<AddBackward0>)
tensor(241.0754, grad_fn=<AddBackward0>)
tensor(225.8897, grad_fn=<AddBackward0>)