import torch
import torchvision.datasets
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10(root='D:\PyCharm\CIFAR10', train=False,
transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64)
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.model = nn.Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model(x)
return x
model_1 = MyModel()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model_1.parameters(), lr=0.01)
for data in dataloader:
imgs, targets = data
outputs_1 = model_1(imgs)
result_loss_1 = loss(outputs_1, targets)
optimizer.zero_grad()
result_loss_1.backward()
optimizer.step()
print(result_loss_1)
model_2 = MyModel()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model_2.parameters(), lr=0.01)
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs, targets = data
outputs_2 = model_2(imgs)
result_loss_2 = loss(outputs_2, targets)
optimizer.zero_grad()
result_loss_2.backward()
optimizer.step()
running_loss = running_loss + result_loss_2
print(running_loss)
model_3 = MyModel()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model_3.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5,
gamma=0.1)
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs, targets = data
outputs_3 = model_3(imgs)
result_loss_3 = loss(outputs_3, targets)
optimizer.zero_grad()
result_loss_3.backward()
optimizer.step()
scheduler.step()
running_loss = running_loss + result_loss_3
print(running_loss)