Pytorch入门教程

TensorBoard

from torch.utils.tensorboard import SummaryWriter

writer = SummaryWriter("logs")

# writer.add_image()
for i in range(100):
    writer.add_scalar("y = x",i,i)

writer.close()
# 终端
tensorboard --logdir=logs --port=6007

torchvision.transforms

  1. Totensor
    trans_totensor = transforms.ToTensor()
    img_tensor = trans_totensor(img)
    writer.add_image("Totensor",img_tensor)
    
  2. Normalize
    print(img_tensor[0][0][0])
    trans_norm = transforms.Normalize([6,3,2],[9,3,5])
    img_norm = trans_norm(img_tensor)
    print(img_norm[0][0][0])
    writer.add_image("Normalize",img_norm,2)
    
  3. Resize
    #img_PIL-->resize-->img_resize_PIL-->Totensor-->img_resize_Tensor
    
    trans_resie = transforms.Resize((512,512))
    img_reszie = trans_resie(img)
    img_reszie = trans_totensor(img_reszie)
    
  4. Compose - reszie - 2
    # compose - resize -2
    
    trans_resie_2 = transforms.Resize(512)
    trans_compose = transforms.Compose([trans_resie_2,trans_totensor])
    img_reszie_2 = trans_compose(img)
    

torchvision

import torchvision
from tensorboardX import SummaryWriter

dataset_transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()
])

## 下载数据集
train_set = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=dataset_transform,download=True)
test_set = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=dataset_transform,download=True)

# print(test_set[0])
# print(test_set.classes)
# img, target = test_set[0]
writer = SummaryWriter("logs")

for i in range(10):
    img,target = test_set[i]
    writer.add_image("test_set",img,i)

writer.close()

torchvision.datasets

from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
# from torch.utils.tensorboard import SummaryWriter
import torchvision

test_data = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor())
# dataset       -> 数据集位置
# batch_size    -> 打包大小
# shuffle       -> 打包选取图片是否随机
# net_workers   -> 加载数据是否采用多进程
# drop_last     -> 数据集最后不够batch_size大小时,是否丢弃
test_loader = DataLoader(dataset=test_data,batch_size=64,shuffle=True,num_workers=0,drop_last=True)


# 测试数据集中第一张图片及target
img, target = test_data[0]
print(img.shape)
print(target)

writer = SummaryWriter("dataloader")
for epoch in range(2):
    step = 0
    for data in test_loader:
        imgs, targets = data
        # print(imgs.shape)
        # print(targets)
        # 显示batch_szie图片时,要把add_image换成add_images
        writer.add_images("Epoch:{}".format(epoch), imgs, step)
        step = step + 1

writer.close()

torch.nn.moduel → 搭建神经网络

import torch
import torchvision
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader

## 准备数据集
dataset = torchvision.datasets.CIFAR10("../dataset",train=True,transform=torchvision.transforms.ToTensor(),download=False)

dataloader = DataLoader(dataset,batch_size=64)

## 搭建生神经网络
class Module(nn.Module):

    def __init__(self):
        super(Module, self).__init__()
        self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)

    def forward(self,x):
        x = self.conv1(x)
        return x

writer = SummaryWriter("../logs")
## 实例化网络
MyModule = Module()
step = 0

for data in dataloader:
    imgs,targets = data
    output = MyModule(imgs)
    ## output -> [64,6,30,30] -> reshape -> [-1,3,30,30]
    output = torch.reshape(output,(-1,3,30,30))
    print("input:",imgs.shape)
    print("output:",output.shape)
    writer.add_images("input",imgs,step)
    writer.add_images("output",output,step)
    step = step + 1

writer.close()

Pooling layers

import torch
import torchvision.datasets
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import Dataset, DataLoader

## 准备数据
dataset = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(dataset,batch_size=64)

# input = torch.tensor([[1,2,0,3,1],
#                       [0,1,2,3,1],
#                       [1,2,1,0,0],
#                       [5,2,3,1,1],
#                       [2,1,0,1,1]],dtype=torch.float32)
# print(input.shape)
# input = torch.reshape(input,(-1,1,5,5))
# print(input.shape)

#构建神经网络
class Module(nn.Module):
    def __init__(self):
        super(Module, self).__init__()
        self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)

    def forward(self,input):
        output = self.maxpool1(input)
        return output


mymodule = Module()
writer = SummaryWriter("../logs")
step = 0
for data in dataloader:
    imgs,targets = data
    output = mymodule(imgs)
    print(imgs.shape)
    print(output.shape)
    writer.add_images("input",imgs,step)
    writer.add_images("output",output,step)
    step = step + 1

writer.close()

Convolution Layers

import torch
import torchvision
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader

## 准备数据集
dataset = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)

dataloader = DataLoader(dataset,batch_size=64)

## 搭建生神经网络
class Module(nn.Module):

    def __init__(self):
        super(Module, self).__init__()
        self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)

    def forward(self,x):
        x = self.conv1(x)
        return x

writer = SummaryWriter("../logs")
## 实例化网络
MyModule = Module()
step = 0

for data in dataloader:
    imgs,targets = data
    output = MyModule(imgs)
    ## output -> [64,6,30,30] -> reshape -> [-1,3,30,30]
    output = torch.reshape(output,(-1,3,30,30))
    print("input:",imgs.shape)
    print("output:",output.shape)
    writer.add_images("input",imgs,step)
    writer.add_images("output",output,step)
    step = step + 1

writer.close()

Linear Layers

import torch
import torchvision.datasets
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(dataset,batch_size=64,drop_last=True)

class Module(nn.Module):
    def __init__(self):
        super(Module, self).__init__()
        self.linear1 = Linear(in_features=196608,out_features=10)

    def forward(self,input):
        output = self.linear1(input)
        return output

mymodule = Module()

for data in dataloader:
    imgs,targets = data
    print("input", imgs.shape)
    # imgs = torch.reshape(imgs,(1,1,1,-1))
    input = torch.flatten(imgs)
    print("input",input.shape)
    output = mymodule(input)
    print(output.shape)

Loss Functions

import torchvision.datasets
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader

#准备数据
datase = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(datase,batch_size=64)

#搭建Net_Work
class CIFAR10_Module(nn.Module):
    def __init__(self):
        super(CIFAR10_Module, self).__init__()
        self.module1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.module1(x)
        return x
# 创建神经网络
cifar_module = CIFAR10_Module()

loss = nn.CrossEntropyLoss()
for data in dataloader:
    imgs,targets = data
    output = cifar_module(imgs)
    result = loss(output,targets)
    print(result)

torch.optim

import torch.optim.sgd
import torchvision.datasets
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader

#准备数据
datase = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(datase,batch_size=64)

#搭建Net_Work
class CIFAR10_Module(nn.Module):
    def __init__(self):
        super(CIFAR10_Module, self).__init__()
        self.module1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.module1(x)
        return x
# 创建神经网络
cifar_module = CIFAR10_Module()
loss = nn.CrossEntropyLoss()
# 创建优化器 -> 选择了SGD随机梯度下降
optim = torch.optim.SGD(cifar_module.parameters(),lr=0.01)
for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:
        imgs,targets = data
        output = cifar_module(imgs)
        result_loss = loss(output,targets)
        optim.zero_grad()
        result_loss.backward()
        optim.step()
        running_loss = running_loss + result_loss
    print(running_loss)

修改现有网络

import torchvision
from torch.nn import Linear

vgg16_True = torchvision.models.vgg16(pretrained=True)
vgg16_Flase = torchvision.models.vgg16(pretrained=False)

print(vgg16_True)

# 添加层
vgg16_True.classifier.add_module("mymodule",Linear(1000,10))
print(vgg16_True)

# 修改层参数
vgg16_Flase.classifier[6] = Linear(4096,10)
print(vgg16_Flase)

网络模型的下载和保存

############################ 保存 ##############################
import torch
import torchvision.models

vgg16 = torchvision.models.vgg16(pretrained=False)

# 保存方式一  模型结构 + 模型参数
torch.save(vgg16,"../module/vgg16_method1.pth",)

# 保存方式二 模型参数
torch.save(vgg16.state_dict(),"../module/vgg16_method2.pth")

############################ 加载 ##############################
import torch
import torchvision.models

# 保存方式一的加载方式
vgg16_method1 = torch.load("../module/vgg16_method1.pth")
# print(vgg16_method1)

# 保存方式二的加载方式
vgg16_method2 = torchvision.models.vgg16(pretrained=Falsh
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值