import torch
import torch
from torchvision import transforms #针对图像进行处理
from torchvision import datasets
from torch.utils.data import DataLoader #loader picture
import torch.nn.functional as F #using function rule
import torch.optim as optim
import matplotlib.pyplot as plt
# 糖尿病预测研判..................................................................................................#
#loader dataset and transform dataset as Tensore
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307, ),(0.3081, ))])
#mnist库的训练集当中有60000张图片
train_dataset = datasets.MNIST(root='../dataset/mnist/',train=True,download=True,transform=transform)
train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size )
#mnist库的训练集当中有10000张图片
test_dataset = datasets.MNIST(root='../dataset/mnist/',train=False,download=True,transform=transform)
test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size )
class Inception(torch.nn.Module): #Inception块:输入:[b,in_channels,w,h] 输出:[b,88,w,h]
def __init__(self,in_channels):
super(Inception,self).__init__()
self.branch_pool = torch.nn.Conv2d(in_channels=in_channels,out_channels=24,kernel_size=1)
self.branch1x1 = torch.nn.Conv2d(in_channels=in_channels,out_channels=16,kernel_size=1)
self.branch5x5_1 = torch.nn.Conv2d(in_channels=in_channels,out_channels=16,kernel_size=1)
self.branch5x5_2 = torch.nn.Conv2d(in_channels=16,out_channels=24,kernel_size=5,padding=2)
self.branch3x3_1 = torch.nn.Conv2d(in_channels=in_channels,out_channels=16,kernel_size=1)
self.branch3x3_2 = torch.nn.Conv2d(in_channels=16, out_channels=24, kernel_size=3,padding=1)
self.branch3x3_3 = torch.nn.Conv2d(in_channels=24, out_channels=24, kernel_size=3,padding=1)
def forward(self, x):
branch_pool = F.avg_pool2d(x,kernel_size=3,padding=1,stride=1)
branch_pool = self.branch_pool(branch_pool) #>>[b,24,w,h]
branch1x1 = self.branch1x1(x) #>>[b,16,w,h]
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5) #>>[b,24,w,h]
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3) #>>[b,24,w,h]
output = [branch1x1,branch5x5,branch3x3,branch_pool] #>>[b,16,w,h]+[b,24,w,h]+[b,24,w,h]+[b,24,w,h]
return torch.cat(output,dim=1) # >>[b,88,w,h] 仅改变通道数
#Design model and use class.................. inherit from nn.Moduel........................................................#
class Model(torch.nn.Module):
def __init__(self):
super( Model, self).__init__()
self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)
self.conv2 = torch.nn.Conv2d(88,20,kernel_size=5)
self.inception1 = Inception(in_channels=10)
self.inception2 = Inception(in_channels=20)
self.mp = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(1408,10)
def forward(self,x):
x_size = x.size(0) #x=[b,1,28,28] x_size = batch_size
x = F.relu(self.mp(self.conv1(x))) #[b,1,28,28]>[b,10,24,24]>[b,10,12,12]
x = self.inception1(x) #[b,88,12,12]
x = F.relu((self.mp(self.conv2(x)))) #[b,88,12,12]>[b,20,8,8]>[b,20,4,4]
x = self.inception2(x) #[b,88,4,4]
x = x.view(x_size,-1) #[b,0,1,1408]
x = self.fc(x) #[b,0,1,10]
return x
model = Model()#实例化
#Training on GPU........................................................................................................#
device = torch.device("cuda:0"if torch.cuda.is_available() else "cuda:0")
model.to(device) #model moving on GPU
#Construct loss functions and optimizer.................Use Torch API...................................................#
criterion = torch.nn.CrossEntropyLoss() #老师使用的是torch.nn.BCELoss(size_average=False)但是我使用这个损失太大了
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5) #lr为学习率,因为0.01太小了,我改成了0.1
#defining one training..........................................................................................................#
def train(epoch):
running_loss =0.0
for batch_idx ,data in enumerate(train_loader,0): #enumerate函数将train_loader中的元组(mini_batch(四维张量)+target(一维张量))列表化,index从0开始
inputs ,target =data
inputs,target = inputs.to(device),target.to(device) #send inputs ,traget at every step to the GPU
optimizer.zero_grad()
#forward+backward+update
outputs = model(inputs)
loss = criterion(outputs,target)
loss.backward()
optimizer.step()
running_loss +=loss.item() #不用item,pytorch会构建计算图
if batch_idx % 300 ==299:
print('[%d,%5d] loss:%.3f' %(epoch+1,batch_idx+1,running_loss/300))
running_loss = 0.0
#plot
x_axis = []
y_axis = []
#defining one testing..........................................................................................................#
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images,labels = data
images, labels = images.to(device), labels.to(device) # send images, labels at every step to the GPU
outputs = model(images)
max,predicted = torch.max(outputs.data,dim=1)
total = total+labels.size(0)
correct =correct+(predicted == labels).sum().item()
print('Accuracy on test set:%d %%' %(100*correct/total))
y_axis.append((correct/total))
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
x_axis.append(epoch)
#drawing.....................................................................................................#
plt.figure(figsize=(7, 7), dpi=80) # 创建画布
plt.plot(x_axis,y_axis, color='b', linestyle='-') # 绘制折线图,点划线
plt.xlabel('epoch') #设置图x轴标签
plt.ylabel('Accuracy rate') #设置图y轴标签
plt.legend(["loss"],title='Accuracy&epoch',loc='upper left',fontsize=15)#设置图列
plt.show() #显示图
刘二大人《PyTorch深度学习实践》完结合集——第11课:卷积神经网络(高级篇)
于 2023-10-14 23:29:36 首次发布