PyTorch学习

(一)安装

1、去官网 https://pytorch.org/
2、根据本机环境复制安装命令(稳定版+Windows+Conda+py36+CPU)
在这里插入图片描述
3、Terminal下输入命令

4、安装完成,查看版本

(二)语法

Tensor

基本属性

import torch
a = torch.Tensor([[1, 2], [3, 4], [5, 6]])
print(a)
"""
tensor([[1., 2.],
        [3., 4.],
        [5., 6.]])
"""
print(a.type())  # torch.FloatTensor
print(a.size())  # a.shape
print(a.sum())  # tensor(21.)
print(a.numel())  # 2*3=6
print(type(a))  # <class 'torch.Tensor'>

其它类型Tensor

import torch
a = torch.zeros(size=(4, 2))
print(a.type())  # torch.FloatTensor
a = torch.IntTensor([[1, 2], [3, 4]])
print(a.type())  # torch.IntTensor
a = torch.LongTensor([[1, 2], [3, 4]])
print(a.type())  # torch.LongTensor
a = torch.DoubleTensor([[1, 2], [3, 4]])
print(a.type())  # torch.DoubleTensor

修改值

import torch
a = torch.zeros((2, 3))
print(a)
a[1, 1] = 3
print(a)

和numpy互转

import torch
a = torch.zeros((2, 3))
print(a)
a = a.numpy()
print(a)
a = torch.from_numpy(a)
print(a)

创建

常量初始化

import torch
a = torch.zeros([2, 4])
b = torch.ones([2, 4])
c = torch.full(size=[2, 4], fill_value=7)  # shape=(2,4)、所有元素为7
print(a, b, c, sep='\n')

随机初始化

import torch
# 采样自(0,1)均匀分布,shape=(2,4)
a = torch.rand(2, 4)
print(a)
a = torch.rand_like(a)  # ≈torch.rand(2, 4)
print(a)
# 在区间[1,3)上随机采样,生成shape=(2,4)的LongTensor
a = torch.randint(1, 3, (2, 4))
print(a)
# 从正态分布(均值为0,方差为1)中采样,shape=(2,4)
a = torch.randn(2, 4)
print(a)

对角矩阵

import torch
a = torch.eye(n=3, m=4)
print(a)

变换

reshape和view

import torch
a = torch.Tensor([1, 2, 3, 4, 5, 6])
b = a.reshape(1, 3, 2)
c = b.reshape(-1, 3)
d = b.view(-1, 6)
print(a)
print('\033[033m{}\033[0m'.format(a.shape))
print(b)
print('\033[033m{}\033[0m'.format(b.shape))
print(c)
print('\033[033m{}\033[0m'.format(c.shape))
print(d)
print('\033[033m{}\033[0m'.format(d.shape))

增删维度(unsqueeze和squeeze)

  • unsqueeze
import torch
a = torch.Tensor([[1, 2, 3], [4, 5, 6]])
print(a)
print('\033[033m{}\033[0m\n'.format(a.shape))
print(a.unsqueeze(0))  # 在0号维度位置插入一个维度
print('\033[033m{}\033[0m\n'.format(a.unsqueeze(0).shape))
print(a.unsqueeze(1))  # 在3号维度位置插入一个维度
print('\033[033m{}\033[0m\n'.format(a.unsqueeze(1).shape))
print(a.unsqueeze(-1))  # 在最后插入一个维度
print('\033[033m{}\033[0m\n'.format(a.unsqueeze(-1).shape))

  • squeeze
import torch
a = torch.Tensor(1, 28, 28, 1)
print(a.shape)
print(a.squeeze(0).shape)  # 尝试删除0号维度:OK
print(a.squeeze(1).shape)  # 尝试删除1号维度:1号维度不是1,删除失败
print(a.squeeze(2).shape)  # 尝试删除2号维度:2号维度不是1,删除失败
print(a.squeeze(3).shape)  # 尝试删除3号维度:OK
print(a.squeeze().shape)  # 能删除的都删除掉
print
torch.Size([1, 28, 28, 1])
torch.Size([28, 28, 1])
torch.Size([1, 28, 28, 1])
torch.Size([1, 28, 28, 1])
torch.Size([1, 28, 28])
torch.Size([28, 28])

扩展维度(expend和repeat)

  • expend
import torch
# 任务:把a维度变到b
a = torch.Tensor([1, 2])
b = torch.rand(3, 2, 4)
# 1、增加维度
a = a.unsqueeze(1).unsqueeze(0)
print(a)
print('\033[033m{}\033[0m\n'.format(a.shape))
# 2、扩展维度
a = a.expand(3, -1, 4)  # -1表示该维度保持不变,也可写2
print(a)
print('\033[033m{}\033[0m\n'.format(a.shape))

  • repeat
import torch
a = torch.Tensor([[1, 2]])
print(a)
a = a.repeat(3, 2)
print(a)

维度交换(t、transpose、permute)

  • 矩阵转置
import torch
a = torch.Tensor(2, 4)
b = a.t()
print(a.shape, b.shape)
  • transpose
import torch
a = torch.Tensor([[[1, 2, 3], [4, 5, 6]]])
print(a)
print('\033[033m{}\033[0m\n'.format(a.shape))
print(a.transpose(1, 2))  # 1号维度和2号维度交换
print('\033[033m{}\033[0m\n'.format(a.transpose(1, 2).shape))

  • permute(多次transpose)
import torch
a = torch.rand(10, 11, 12, 13)
print(a.shape)
print(a.permute(1, 3, 0, 2).shape)

torch.Size([10, 11, 12, 13])
torch.Size([11, 13, 10, 12])

broadcast_tensors

自动维度扩展:自动实现了若干unsqueeze和expand操作,使两个Tensor的shape一致

import torch
a = torch.Tensor([[1, 2]])
b = torch.Tensor([[1], [2]])
c = torch.broadcast_tensors(a, b, a @ b)
print(*c, sep='\n')

运算

矩阵乘法

import torch
a = torch.Tensor([[1, 2]])
b = torch.Tensor([[1], [2]])
print(a @ b)  # torch.matmul(a, b)
print(a * b)  # torch.mul(a, b)

矩阵乘法(维度>2)

仅在最后的两个维度上,要求前面的维度保持一致

import torch
a = torch.rand(2, 4, 32, 99)
b = torch.rand(2, 4, 99, 64)
print(torch.matmul(a, b).shape)
print
torch.Size([2, 4, 32, 64])

拼接、拆分

cat

import torch
a = torch.rand(3, 2, 4)
b = torch.rand(3, 2, 5)
print(torch.cat([a, b], dim=2).shape)
print
torch.Size([3, 2, 9])

stack

import torch
a = torch.Tensor([[2, 2, 2], [3, 3, 3], [4, 4, 4]])
b = torch.Tensor([[5, 5, 5], [6, 6, 6], [7, 7, 7]])
# print(torch.stack(tensors=[a, b], dim=0))
# print(torch.stack(tensors=[a, b], dim=1))
# print(torch.stack(tensors=[a, b], dim=2))
print(torch.stack(tensors=[a, b], dim=0).shape)
print(torch.stack(tensors=[a, b], dim=1).shape)
print(torch.stack(tensors=[a, b], dim=2).shape)
print
torch.Size([2, 3, 3])
torch.Size([3, 2, 3])
torch.Size([3, 3, 2])

split

import torch
a = torch.rand(9, 6, 3, 32)
a1, a2, a3 = a.split(split_size=2, dim=1)  # 对1号维度拆分,拆分后每个Tensor取长度2
print(a1.shape, a1.shape == a2.shape == a3.shape)
print
torch.Size([9, 2, 3, 32]) True

chunk

import torch
c = torch.rand(5, 7, 3)
c1, c2, c3 = c.chunk(chunks=3, dim=1)  # 在维度1上,拆分3份,尽量地均分
print(c1.shape, c2.shape, c3.shape, sep='\n')
print
torch.Size([5, 3, 3])
torch.Size([5, 3, 3])
torch.Size([5, 1, 3])

索引、切片

from torch import ones
a = ones((5, 15, 25, 35))
print(a[0].size())  # torch.Size([15, 25, 35])
print(a[0, 0].shape)  # torch.Size([25, 35])
print(a[:, 0, :, 0].shape)  # torch.Size([5, 25])
print(a[1:4, 0, :, 0:34:2].shape)  # torch.Size([3, 25, 17])
print
torch.Size([15, 25, 35])
torch.Size([25, 35])
torch.Size([5, 25])
torch.Size([3, 25, 17])
import torch
a = torch.IntTensor([[0, -1, -2],
                     [-3, 4, -5]])
b = a.ge(0)
c = torch.masked_select(a, b)
print(a, b, c, sep='\n')

(三)卷积神经网络【极简】

from torchvision import transforms
from torchvision.datasets import CIFAR10
from torch.utils.data.dataloader import DataLoader
from torch import nn
from torch.nn import functional as F
from torch import optim
import torch

"""内置数据集下载,并转为Tensor"""
transform = transforms.Compose([transforms.ToTensor()])
dataset_train = CIFAR10(root='data', train=True, download=True, transform=transform)
dataset_test = CIFAR10(root='data', train=False, download=True, transform=transform)

"""将数据集装入数据加载器,设置batch_size"""
loader_train = DataLoader(dataset_train, batch_size=16)
loader_test = DataLoader(dataset_test, batch_size=16)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1)
        self.fc1 = nn.Linear(in_features=30 * 8 * 8, out_features=300)
        self.fc2 = nn.Linear(in_features=300, out_features=10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 30 * 8 * 8)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x


"""神经网络、损失函数、优化器"""
net = Net()
cross_entropy_loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(params=net.parameters(), lr=1e-3, momentum=.9)

"""训练"""
for epoch in range(2):
    running_loss = 0.
    for inputs, labels in loader_train:
        # 参数梯度清零
        optimizer.zero_grad()
        # 前向传播
        outputs = net(inputs)
        # 交叉熵损失
        loss = cross_entropy_loss(outputs, labels)
        # 反向传播
        loss.backward()
        # 参数更新
        optimizer.step()
        # 累计损失
        running_loss += loss.item()
    print('第%d轮损失值:%.2f' % (epoch + 1, running_loss))

"""准确度"""
correct, total = 0, 0
with torch.no_grad():  # 禁止梯度计算,以节省内存
    for images, labels in loader_test:
        outputs = net(images)
        max_values, max_indexes = torch.max(outputs.data, dim=1)
        total += labels.size(0)
        correct += (max_indexes == labels).sum().item()
print('10000个样本的准确率:%d%%' % (100 * correct / total))

1、数据加载

1.1、内置数据集下载+数据预处理

from torchvision.datasets import CIFAR10
from torchvision import transforms
transform = transforms.Compose([transforms.ToTensor()])  # 数据预处理
dataset = CIFAR10(root='data', train=True, download=True, transform=transform)  # 数据下载

1.2、数据加载器

from torch.utils.data.dataloader import DataLoader
data_loader = DataLoader(dataset, batch_size=8)

1.3、数据集查看

import matplotlib.pyplot as mp, numpy as np
from torchvision.utils import make_grid
images, labels = iter(data_loader).__next__()
print(images.shape)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
print('  '.join(classes[i] for i in labels))
mp.imshow(np.transpose(make_grid(images).numpy(), (1, 2, 0)))
mp.show()

torch.Size([8, 3, 32, 32])
frog truck truck deer car car bird horse

2、神经网络构建

建模是继承基类nn.Module,需要有面向对象的基础知识

from torch import nn
from torch.nn import functional as F

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()

    def forward(self, x):
        return x

重写前向传播,下面有3种写法↓

  • 方法1
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1)
        self.fc1 = nn.Linear(in_features=30 * 8 * 8, out_features=300)
        self.fc2 = nn.Linear(in_features=300, out_features=10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 30 * 8 * 8)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x
  • 方法2
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv = nn.Sequential()
        conv.add_module('c1', nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1))
        conv.add_module('r1', nn.ReLU(inplace=True))
        conv.add_module('p1', nn.MaxPool2d(kernel_size=2, stride=2))
        conv.add_module('c2', nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1))
        conv.add_module('r2', nn.ReLU(inplace=True))
        conv.add_module('p2', nn.MaxPool2d(kernel_size=2, stride=2))
        self.linear = nn.Sequential()
        linear.add_module('l1', nn.Linear(in_features=30 * 8 * 8, out_features=300))
        linear.add_module('e3', nn.ReLU(inplace=True))
        linear.add_module('l2', nn.Linear(in_features=300, out_features=10))

    def forward(self, x):
        x = self.conv(x)
        x = x.view(-1, 30 * 8 * 8)
        x = self.linear(x)
        return x
  • 方法3
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        self.linear = nn.Sequential(
            nn.Linear(in_features=30 * 8 * 8, out_features=300),
            nn.ReLU(inplace=True),
            nn.Linear(in_features=300, out_features=10),
        )

    def forward(self, x):
        x = self.conv(x)
        x = x.view(-1, 30 * 8 * 8)
        x = self.linear(x)
        return x

3、损失函数、优化器

from torch import optim
net = Net()  # 创建模型对象
cross_entropy_loss = nn.CrossEntropyLoss()  # 交叉熵损失
optimizer = optim.SGD(params=net.parameters(), lr=1e-3, momentum=.9)  # 随机梯度下降优化器

4、训练

for epoch in range(3):
    loss_value = 0.
    for inputs, labels in loader_train:
        # 参数梯度清零
        optimizer.zero_grad()
        # 前向传播
        outputs = net(inputs)
        # 交叉熵损失
        loss = cross_entropy_loss(outputs, labels)
        # 反向传播
        loss.backward()
        # 参数更新
        optimizer.step()
        # 累计损失
        loss_value += loss.item()
    print('第%d轮损失值:%.2f' % (epoch + 1, loss_value))
print
第1轮损失值:6134.84
第2轮损失值:4787.88
第3轮损失值:4185.97

5、评估

correct, total = 0, 0
with torch.no_grad():  # 禁止梯度计算,以节省内存
    for images, labels in loader_test:
        outputs = net(images)
        max_values, max_indexes = torch.max(outputs.data, dim=1)
        total += labels.size(0)
        correct += (max_indexes == labels).sum().item()
print('10000个样本的准确率:%d%%' % (100 * correct / total))
print
10000个样本的准确率:54%

(四)附录

EnCn
squeeze挤压
permute交换
broadcast广播
chunk厚块
vision视力
gradient梯度
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小基基o_O

您的鼓励是我创作的巨大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值