import torch.optim
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10('./torchvision_dataset',train=False,download=False,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=64,drop_last=True)
class Zkl(nn.Module):
def __init__(self):
super(Zkl, self).__init__()
# self.conv1 = Conv2d(3,32,5,padding=2)
# self.maxpool1 = MaxPool2d(2)
# self.conv2 = Conv2d(32,32,5,padding=2)
# self.maxpool2 = MaxPool2d(2)
# self.conv3 = Conv2d(32,64,5,padding=2)
# self.maxpool3 = MaxPool2d(2)
# self.flatten = Flatten()
# self.linear1 = Linear(1024,64)
# self.linear2 = Linear(64,10)
self.modle1 = nn.Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64,10)
)
def forward(self,x):
# x = self.conv1(x)
# x = self.maxpool1(x)
# x = self.conv2(x)
# x = self.maxpool2(x)
# x = self.conv3(x)
# x = self.maxpool3(x)
# x = self.flatten(x)
# x = self.linear1(x)
# x = self.linear2(x)
x = self.modle1(x)
return x
#引用类
zkl = Zkl()
#定义损失函数
loss = nn.CrossEntropyLoss()
#定义优化器
optim = torch.optim.SGD(zkl.parameters(),lr=0.1)
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
#获取输入特征和输入标签
imgs,targets = data
#将输入放入模型,获取输出
output = zkl(imgs)
#再通过损失函数获取损失值
loss_cro = loss(output,targets) # 两个参数为实际输出和目标输出
#loss_cro.backward() # 梯度下降法
#为防止梯度累积,每一次循环都要将每个节点对应的梯度归为0,上一次循环计算出来的梯度对这一次循环没有用
optim.zero_grad()
#再获取每个参数可以调节的梯度
loss_cro.backward()
#再对每个参数进行调优
optim.step()
#最后打印每一次训练的损失值,观察是否在缩小
#print(loss_cro)
running_loss += loss_cro
print(running_loss)