利用CNN
import torch
from torchvision.datasets import MNIST
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
BATCH_SIZE = 150
LEARNING_RATE = 0.005
class DR(nn.Module):
def __init__(self):
super(DR, self).__init__()
self.forwardSeq = nn.Sequential(
nn.Conv2d(
in_channels=1,#输入通道数
out_channels=16,#输出通道数
kernel_size=3,#卷积核大小
stride=1,#滑动步长
padding=2,#窗口扩充
padding_mode="zeros"#对扩充的窗口以常量0进行填充
),#输入28*28,输出28*28
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),##输入28*28,输出14*14
nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2),#输入14*14,输出14*14
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),#输入14*14,输出7*7
)
self.linear = nn.Linear(32*7*7, 10)
def forward(self, input):
out = self.forwardSeq(input)
out = out.view(input.size(0), -1)#全展开,但要注意保留batchsize的维度即input.size(0)
return self.linear(out)
pass
def training(epoch, model, criterion, optimaizer, loader):
rights = []
losses = []
maxAcc = {"idx":0, "num":0}
minLoss = {"idx":0, "num":np.inf}
for i in range(epoch):
print("epoch:", i)
for idx, (data, target) in enumerate(loader):
out = model(data)
loss = criterion(out, target)
loss.backward()
optimaizer.step()
optimaizer.zero_grad()
if idx % 25 == 0:
pre = out.max(dim=-1)[1]
acc = pre.eq(target).float().mean()
rights.append(acc.item())
losses.append(loss.item())
maxAcc = {"idx":len(rights) * 25, "num":acc.item()} if maxAcc["num"] <= acc else maxAcc
minLoss = {"idx":len(rights) * 25, "num":loss.item()} if minLoss["num"] >= loss else minLoss
print("index({}) in epoch({}), loss is {}, acc is {}".format(idx, i, loss, acc))
rights = np.array(rights)
losses = np.array(losses)
length = len(rights) * 25
return rights,losses,maxAcc,minLoss,length
pass
sampleTR = MNIST(download=True, root="./data", transform=transforms.ToTensor(), train=True)#训练
sampleTE = MNIST(download=True, root="./data", transform=transforms.ToTensor(), train=False)#测试
loader = DataLoader(dataset=sampleTR, batch_size=BATCH_SIZE, shuffle=True)
model = DR()
criterion = nn.CrossEntropyLoss()
optimaizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
args = training(3, model, criterion, optimaizer, loader)
fig,ax = plt.subplots()
ax.plot(np.array([0, length]), np.array([1, 1]), c="red", label="one")
ax.plot(np.linspace(0, length, rights.size), rights, label="trainAcc", c="blue")
ax.plot(np.linspace(0, length, losses.size), losses, label="trainLoss", c="green")
ax.scatter(np.array(maxAcc["idx"]), np.array(maxAcc["num"]), c="blue", s=30, label="maxRight[index:{} number:{}]".format(maxAcc["idx"], maxAcc["num"]))
ax.scatter(np.array(minLoss["idx"]), np.array(minLoss["num"]), c="green", s=30, label="minLoss[index:{} number:{}]".format(minLoss["idx"], minLoss["num"]))
ax.legend()
plt.yticks(np.linspace(0, losses.max(), 10))
plt.xticks(np.linspace(0, length, 20), rotation=50)
plt.xlabel("Unit:BATCH, BATCH_SIZE:{} Samples".format(BATCH_SIZE))
plt.show()

利用普通神经网络:
import numpy as np
import torch
from torchvision.datasets import MNIST
import matplotlib.pyplot as plt
from torchvision import transforms
from torch import nn
from torch.utils.data import DataLoader
'''获取数据'''
mnist_tr = MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())#单通道28*28,共60000个样本
mnist_te = MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())#共10000个样本
BatchSize = 256
LearningRate = 0.005
class DR(nn.Module):
def __init__(self):
super(DR, self).__init__()
self.linear_1 = nn.Linear(28*28, 28)#设置全连接层1
self.linear_2 = nn.Linear(28, 10)#设置全连接层2
def forward(self, input):
out = self.linear_1(input)
nn.ReLU(inplace=True)(out)
return self.linear_2(out)
pass
dr = DR()
criterion = nn.CrossEntropyLoss()#定义损失函数
optimal = torch.optim.Adam(dr.parameters(), lr=LearningRate)#定义优化算法
loader = DataLoader(dataset=mnist_tr, shuffle=True, batch_size=BatchSize, drop_last=True)
def train(epoch):
accs = []
losses = []
for i in range(epoch):
print("epoch:{}-----------------------------------------\n".format(i))
for index, (data, target) in enumerate(loader):
out = dr(data.view(BatchSize, 28*28))
loss = criterion(out, target)
loss.backward()
optimal.step()
optimal.zero_grad()
if index % 5 == 0:
pre = out.max(dim=-1)[1]
acc = pre.eq(target).float().mean()
accs.append(acc.item())
losses.append(loss.item())
print("epoch: {}, index: {}, loss: {}, accuracy: {}".format(i, index, loss, acc))
print("accs长度:", len(accs))
plt.plot(np.linspace(0, len(losses)*25, len(losses)), np.array(losses), label="loss curve")
plt.plot(np.linspace(0, len(accs)*25, len(accs)), np.array(accs), label="acc curve")
plt.legend()
plt.show()
pass
train(3)

该代码示例展示了如何用PyTorch实现一个简单的卷积神经网络(CNN)模型来处理MNIST数据集。模型包括两个卷积层,最大池化层,以及全连接层。在训练过程中,使用Adam优化器和交叉熵损失函数,监控并记录了训练过程中的损失和准确性。
1012

被折叠的 条评论
为什么被折叠?



