常引用的模块
import torch as t
import torch.nn as nn
from torch.autograd import Variable as V
import torch.utils.data as Data
import torch.optim as optim
import torch.nn.functional as F
import torchvision
快速构建网络
法一:
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
net1 = Net(1, 10, 1)
法二:
net2 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1)
)
法三(一和二结合):
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1=nn.Sequential(
nn.Conv2d(1,16,5,1,2), # 参数分别是输入channel,输出channel,kernel_size,strides,padding 当stride为1时,padding=(kernel_size-1)/2时图像大小不变
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.conv2=nn.Sequential(
nn.Conv2d(16,32,5,1,2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.linear=nn.Linear(32*7*7,10)
def forward(self,x):
x=self.conv1(x)
x=self.conv2(x)
x=x.view(x.size(0),-1)
x=self.linear(x)
return x
注意用的激活函数relu所在的模块一样,一个是torch.nn.Relu,一个是torch.nn.functional.relu
导入数据集
导入MNIST数据集:
train_data=torchvision.datasets.MNIST(
root='./MNIST', #存放的目录下
download=True, #是否要现下载
train=True, #是否是训练集
transform=torchvision.transform.ToTensor() #将PIL.Image 或 numpy.array转化成 Tensor格式
)
此时就导入了Tensor类型的数据集了,可以手动输出显示:
print(train_data.train_data.size()) # 输出torch.Size([60000, 28, 28])
print(train_data.train_labels.size()) # 输出torch.Size([60000])
plt.imshow(train_data.train_data[0].numpy(),cmap='gray') # plt不能显示tensor类型数据,要转化成numpy
plt.title('%i' % train_data.train_labels[0])
plt.show()
发现输出的图片格式不是标准的pytorch训练格式,所以要进行变换:
train_loader=Data.DataLoader(dataset=train_data, batch_size=64, shuffle=True, num_workers=2)
train_iter=iter(train_loader)
batch_x1,batch_y1=train_iter.next()
print(batch_x1.size()) # 输出torch.Size([64, 1, 28, 28])
print(t.max(batch_x1)) # 输出1.0 ,此时已将图片从0-255.0转化成0-1.0
# 同理
for (batch_x,batch_y) in train_loader:
...
# 也会加入channel维度和正则化
| Tensor数据 | DataLoader迭代的数据 |
|---|---|
| size:[64,28,28] | 增加channel维度:[64,1,28,28] |
| pixel范围:0-255 | pixel范围:0-1.0 |
| 数据类型:ByteTensor | 数据类型:FloatTensor |
常用torch函数:
torch.cat()
a=t.ones(2,2)
b=t.zeros(2,2)
c=t.cat([a,b],1)
# 输出
# 1 1 0 0
# 1 1 0 0
# [torch.FloatTensor of size 2x4]
c=t.cat([a,b],0)
# 输出
# 1 1
# 1 1
# 0 0
# 0 0
# [torch.FloatTensor of size 4x2]
torch.randn(2,3)
均值为0,方差为1 的正态分布
torch.rand(3,2)
[0,1)的均匀分布
torch.nn.functional.softmax() 和 .sum() 和 torch.nn.functional.log_softmax()
data = V(t.randn(5,2))
print(data)
print(F.softmax(data,dim=1))
print(F.softmax(data,dim=1).sum(dim=1))
'''
Variable containing:
1.0147 -0.1819
0.6182 0.0393
0.9262 -0.9596
0.3518 -0.4039
1.7584 -0.4145
[torch.FloatTensor of size 5x2]
Variable containing:
0.7679 0.2321
0.6408 0.3592
0.8683 0.1317
0.6804 0.3196
0.8978 0.1022
[torch.FloatTensor of size 5x2]
Variable containing:
1
1
1
1
1
[torch.FloatTensor of size 5]
'''
torch.nn.functional.log_softmax()等价于log(softmax())
本文详细介绍使用PyTorch框架构建神经网络的方法,包括三种构建网络的方式:继承Module类、Sequential容器以及结合两者。同时,文章讲解了如何导入MNIST数据集,并进行预处理以适应PyTorch的训练需求。此外,还介绍了PyTorch中常用的函数和模块,如torch.cat(), torch.randn(), torch.nn.functional.softmax()等。
15万+

被折叠的 条评论
为什么被折叠?



