import torch
import torchvision
from torch import nn
from torch.nn import Sequential
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device",device)
train_data=torchvision.datasets.CIFAR10(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_data=torchvision.datasets.CIFAR10(root='./data', train=False, transform=transforms.ToTensor(),download=True)
train_data_size=len(train_data)
test_data_size=len(test_data)
print("训练集长度:{}".format(train_data_size))
print("测试集长度:{}".format(test_data_size))
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model1=Sequential(
nn.Conv2d(3, 32, 5, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, padding=2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x=self.model1(x)
return x
net=Net().to(device)
loss_fn=nn.CrossEntropyLoss().to(device)
optimizer=torch.optim.SGD(net.parameters(), lr=0.01)
epoch=10
total_train_step=0
total_test_step=0
writer=SummaryWriter(log_dir='logs')
for i in range(epoch):
print("-----epoch:{}----".format(i))
net.train()
train_total_loss=0
train_total_acc=0
for data in train_dataloader:
imgs, labels = data
imgs=imgs.to(device)
labels=labels.to(device)
output=net(imgs)
loss = loss_fn(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step += 1
train_total_loss+=loss.item()
accuracy_cnt=(output.argmax(1)==labels).sum().item()
train_total_acc+=accuracy_cnt
print("train_accuracy:{:.2f}%, train_loss:{:.4f}".format((train_total_acc/train_data_size)*100,train_total_loss/train_data_size))
writer.add_scalar('train_loss', train_total_loss/train_data_size, total_train_step)
net.eval()
total_test_loss=0
total_accuracy=0
with torch.no_grad():
for data in test_dataloader:
imgs, labels = data
output=net(imgs)
loss = loss_fn(output, labels)
total_test_loss += loss.item()
accuracy_res=(output.argmax(1)==labels).sum().item()
total_accuracy += accuracy_res
print("test Accuracy:{:.2f}%,test loss".format((total_test_loss/test_data_size)*100),total_test_loss/test_data_size)
writer.add_scalar('test_loss', total_test_loss/test_data_size, total_train_step)
writer.add_scalar('test_accuracy', total_accuracy/test_data_size, total_train_step)
total_test_step+=1
writer.close()