Pytorch入门

张量

新建张量

  • 1.生成一个空张量torch.empty
import torch
x = torch.empty(5,3)
print(x)

在这里插入图片描述

  • 2.rand(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor返回一个张量,该张量由间隔上均匀分布的随机数填充:math:[0,1)
import torch
x = torch.rand(2, 3)
print(x)

在这里插入图片描述

  • 3.randn(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor返回一个张量,该张量由均值为“0”、方差为“1”的正态分布中的随机数填充(也称为标准正态分布)
import torch
x = torch.randn(2, 3)
print(x)

在这里插入图片描述

  • 4.normal(mean, std, *, generator=None, out=None) -> Tensor给定平均值和标准差;返回对应**正太分布**数值
import torch
x = torch.normal(2, 3, size=(1, 4))
print(x)

在这里插入图片描述

  • linspace(…)
    linspace(start, end, steps=100, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
    在start和end之间等距的返回数据
import torch
x = torch.linspace(3, 10, steps=5)
print(x)

tensor([ 3.0000,  4.7500,  6.5000,  8.2500, 10.0000])
  • **torch.zeros(2, 3)**生成一个全零的张量
import torch
x = torch.zeros(2, 3)
print(x)
tensor([[0., 0., 0.],
        [0., 0., 0.]])
  • **torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])**已知矩阵转换为张量
x = torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
print(x)
tensor([[0.1000, 1.2000],
        [2.2000, 3.1000],
        [4.9000, 5.2000]])
  • 生成一个同样维度的矩阵
import torch
import numpy as np
x = torch.ones(5,5, dtype=torch.double)
print(x)
x = torch.randn_like(x,dtype=torch.float)
print(x)
tensor([[1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1.]], dtype=torch.float64)
tensor([[ 0.2423,  1.7955, -0.5171,  1.1406, -0.5407],
        [-1.1997,  0.1341, -1.2628, -1.4078, -0.3429],
        [ 1.1838,  0.4697,  0.2860, -0.0708,  0.2232],
        [-0.0636, -0.1558, -0.3082, -0.2701, -0.5039],
        [ 0.9021, -1.5196, -0.8247, -1.4582,  0.8148]])
  • 获取张量的形状print(x.size())
print(x.size())
torch.Size([5, 5])

张量运算

  • 加法
y = torch.rand(5,5)
# 加法1
print(x + y)
#加法2
print(torch.add(x,y))
# 加法 3
y.add_(x)
print(y)

tensor([[-0.5748,  0.5915, -0.1207,  0.9979, -0.0416],
        [ 1.1396,  0.4685,  0.9498,  1.1800, -2.0215],
        [ 0.3359, -1.4478, -1.2943,  0.4292,  0.1471],
        [-0.3918,  0.5140, -1.2918,  0.4770, -0.1626],
        [ 0.4617,  2.1801,  0.1918,  0.0635,  2.1010]])
tensor([[-0.5748,  0.5915, -0.1207,  0.9979, -0.0416],
        [ 1.1396,  0.4685,  0.9498,  1.1800, -2.0215],
        [ 0.3359, -1.4478, -1.2943,  0.4292,  0.1471],
        [-0.3918,  0.5140, -1.2918,  0.4770, -0.1626],
        [ 0.4617,  2.1801,  0.1918,  0.0635,  2.1010]])
        tensor([[-0.5748,  0.5915, -0.1207,  0.9979, -0.0416],
        [ 1.1396,  0.4685,  0.9498,  1.1800, -2.0215],
        [ 0.3359, -1.4478, -1.2943,  0.4292,  0.1471],
        [-0.3918,  0.5140, -1.2918,  0.4770, -0.1626],
        [ 0.4617,  2.1801,  0.1918,  0.0635,  2.1010]])
  • 指定输出到某一个参数
result = torch.empty(5,3)
torch.add(x,y,out = result)
print(result)
print(x)
print(x[:, 1])
tensor([[-0.9804,  0.1685,  0.8569,  2.0596,  0.2225],
        [ 1.0814,  1.4838,  0.5020,  1.5157,  0.8952],
        [-0.3903,  1.9247,  0.8741,  1.9728,  0.2694],
        [ 1.1786, -0.7924, -0.8760,  0.3188,  1.7267],
        [ 1.6438,  1.4482,  0.7289,  0.9638,  0.8927]])
        tensor([[ 0.2748, -0.0774, -1.2880, -0.0365,  0.8686],
        [ 0.1119,  0.8125,  1.2119,  0.5379, -0.2646],
        [-0.1414,  0.8118, -1.0719, -0.5585,  0.0922],
        [ 0.1249,  0.5992, -0.7017,  0.0810, -0.2659],
        [-0.6018, -0.3716,  2.8113,  0.7393, -0.6450]])
tensor([-0.0774,  0.8125,  0.8118,  0.5992, -0.3716])
  • 改变张量的维度
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 4)  # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([4, 4])

张量的转换

在用CPU的时候numpy和pytorch共用物理内存

  • 张量和numpy之间的转换
a = torch.ones(5)
print(a)
"""
tensor([1., 1., 1., 1., 1.])
"""
b = a.numpy()
print(b)
"""
[1. 1. 1. 1. 1.]
"""
a.add_(1)
print(a)
print(b)
"""
tensor([2., 2., 2., 2., 2.])
[2. 2. 2. 2. 2.]
"""
  • numpy转pytorch
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
"""
[2. 2. 2. 2. 2.]
tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
"""
  • GPU下数据转换
# let us run this cell only if CUDA is available
# We will use ``torch.device`` objects to move tensors in and out of GPU


if torch.cuda.is_available():
    device = torch.device("cuda")          # a CUDA device object
    y = torch.ones_like(x, device=device)  # directly create a tensor on GPU
    x = x.to(device)                       # or just use strings ``.to("cuda")``
    z = x + y
    print(z)
    print(z.to("cpu", torch.double))       # ``.to`` can also change dtype together!
"""
tensor([-0.4743], device='cuda:0')
tensor([-0.4743], dtype=torch.float64)
"""

Transfrom的初步使用

from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

#图片转换为PIL格式
writer = SummaryWriter("logs")
img = Image.open("image/648.jpg")
print(img)

#用tensorboard显示 totensor
trans_totensor = transforms.ToTensor()
img_temsor = trans_totensor(img)
writer.add_image("Totensor", img_temsor)

#Normalize
#output[channel] = (input[channel] - mean[channel]) / std[channel]
print(img_temsor[0][0][1])#0层0行0列
trans_norm = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
img_norm = trans_norm(img_temsor)
print(img_norm[0][0][1])
writer.add_image("Normalize", img_norm)


#resize
print(img.size)
#img PIL->resize->img_resize PIL
trans_resize = transforms.Resize((512, 512))
#img resize PIL-> totensor->img_resize tensor
img_resize = trans_resize(img)
#由于返回数据是PIL图像,需要转换为tensor才能进行tensorboard显示
img_resize = trans_totensor(img_resize)

writer.add_image("Resize", img_resize, 0)


#Compose-resize-2
trans_resize_2 = transforms.Resize(512)
#PTL->PIL->tensor
trans_compose = transforms.Compose([trans_resize_2, trans_totensor])
img_resize_2 = trans_compose(img)
writer.add_image("Resize", img_resize_2, 1)

#RandomCrop
trans_random = transforms.RandomCrop((255, 100))#随机裁剪
trans_cmpose_2 = transforms.Compose([trans_random, trans_totensor])
for i in range(10):
    img_crop = trans_cmpose_2(img)
    writer.add_image("RandomCrop", img_crop, i)

writer.close()
### PyTorch 入门教程 #### 加载库并准备数据集 为了开始使用 PyTorch 进行机器学习模型训练,加载必要的库是第一步。这包括 `torch` 及其子模块 `nn`(神经网络),以及用于获取标准图像分类数据集的 `datasets` 和转换工具 `ToTensor`。 ```python import torch from torch import nn from torch.utils.data import DataLoader from torchvision import datasets, transforms ``` 接着定义如何预处理输入图片的数据变换链——这里仅简单地将其转成张量形式: ```python transform = transforms.ToTensor() ``` 创建一个实例化后的 Dataset 对象来存储样本及其对应标签,并通过指定根目录、是否作为测试集以及其他参数完成初始化工作;最后利用 DataLoader 来封装这个自定义或内置的数据源类以便后续取用批次化的训练/验证数据[^1]。 ```python training_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor(), ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=ToTensor(), ) ``` 构建好上述组件之后就可以轻松获得批量大小为 64 的迭代器了: ```python train_dataloader = DataLoader(training_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64) ``` #### 定义模型结构 接下来要做的就是设计具体的网络架构,在此以简单的全连接层为例展示基本操作方法。当然实际应用中可能还会涉及到卷积核池化等更复杂的运算单元组合方式。 ```python class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() self.flatten = nn.Flatten() # 展平多维特征向量 self.linear_relu_stack = nn.Sequential( # 构建线性层序列 nn.Linear(28*28, 512), nn.ReLU(), # 非线性激活函数 nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10), # 输出维度等于类别数 ) def forward(self, x): # 正向传播过程描述 x = self.flatten(x) logits = self.linear_relu_stack(x) return logits model = NeuralNetwork().to('cuda' if torch.cuda.is_available() else 'cpu') print(model) # 打印查看当前设备上的模型配置详情 ``` 这段代码实现了两件事:一是继承基类 `Module` 并重写构造函数与前馈计算逻辑;二是根据硬件条件自动选择运行平台从而提高效率。 #### 训练循环设置 有了前面几步打下的基础,现在可以着手编写完整的训练流程啦! ```python loss_fn = nn.CrossEntropyLoss() # 设置损失函数类型 optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) def train(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) for batch, (X, y) in enumerate(dataloader): X, y = X.to(device), y.to(device) pred = model(X) # 调用模型预测输出 loss = loss_fn(pred, y) # 计算误差值 optimizer.zero_grad() # 清除梯度缓存区中的旧记录以免累积影响新一次更新 loss.backward() # 自动求导机制反传误差信号调整权重参数 optimizer.step() if batch % 100 == 0: current = batch * len(X) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") device = 'cuda' if torch.cuda.is_available() else 'cpu' for t in range(epochs): print(f"Epoch {t+1}\n-------------------------------") train(train_dataloader, model, loss_fn, optimizer) print("Done!") ``` 以上部分展示了怎样设定优化算法超参选项并通过遍历 epoch 数目实现多次迭代直至收敛稳定状态为止的过程。 #### 测试评估性能指标 当所有准备工作都完成后自然少不了检验成果好坏的重要环节咯~下面给出了一种简易版的方法供参考借鉴: ```python def test(dataloader, model, loss_fn): size = len(dataloader.dataset) num_batches = len(dataloader) model.eval() test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: X, y = X.to(device), y.to(device) pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1)==y).type(torch.float).sum().item() test_loss /= num_batches correct /= size print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n") test(test_dataloader, model, loss_fn) ``` 该片段主要围绕着关闭 dropout 等随机因素干扰的前提下统计正确率和平均交叉熵得分来进行最终评判。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值