MNIST代码实现以及网络结构(手写数字识别,可以直接运行,数据集可以自动下载,无需准备)
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torchsummary import summary
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1) # 2d卷积 输入通道1,输出通道32,卷积核3*3,padding为1
self.conv2 = nn.Conv2d(32, 64, 3, 1) # 2d卷积 输入通道32,输出通道64,卷积核3*3,padding为1
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5) # senme
self.fc1 = nn.Linear(9216, 128) #
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x) # 输入: 6000*1*28*28 输出: 6000*32*26*26
x = F.relu(x)
x = self.conv2(x) # 输入: 6000*32*26*26 输出: 6000*64*24*24
x = F.relu(x)
x = F.max_pool2d(x, 2) # 输入: 6000*64*24*24 输出: 6000*64×12×12
x = self.dropout1(x)
x = torch.flatten(x, 1) # 输入: 6000*64×12×12 输出: 6000*9216
x = self.fc1(x) # 输入: 6000*9216 输出: 6000*128
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x) # 输入: 6000*128 输出: 6000*10
output = F.log_softmax(x, dim=1)
return output
def train(args, model, devic