import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size= 64
transoform =transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0),(1))
])
train_data=datasets.MNIST(root="../",
train=True,
transform=transoform,#这里写错了。写成了transforms了
download = True,
)
train_loader=DataLoader(
train_data,
batch_size=32,
shuffle=True
)
test_data = datasets.MNIST(root="../",
train=False,
transform=transoform,
download=True
)
test_loader=DataLoader(
dataset=test_data,
batch_size=32,
shuffle=False
)
class Mo(torch.nn.Module):
def __init__(self):
super(Mo, self).__init__()
self.l1=torch.nn.Linear(784,12)
self.l2=torch.nn.Linear(12,10)
def forward(self,x):
x=x.view(-1,784)#这里写错了,写成x.view 没写x=
x=F.relu(self.l1(x))
x=self.l2(x)
return x
model=Mo()
criterion=torch.nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=0.1)
def train(epoch):
for batch_idx,data in enumerate(train_loader,0):
x,y=data
y_pred=model(x)
optimizer.zero_grad()
loss =criterion(y_pred,y)
loss.backward()
optimizer.step()
if(batch_idx % 100 ==0):
print("epoch{},loss={}".format(epoch,loss.item()))
def test():
correct,total=0,0
with torch.no_grad():
for data in enumerate(test_loader):
x,y=data
y_pred=model(x)
_,predict=torch.max(y_pred,dim=1)
total+=y.size(0)
correct+=(predict==y).sum().item()
print("test acc{}".format(correct/total))
for epoch in range(1000):
train(epoch)
if(epoch%100==0):
test()
09. Softmax Classifier
最新推荐文章于 2023-08-29 17:23:15 发布