TensorBoard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("logs")
for i in range(100):
writer.add_scalar("y = x",i,i)
writer.close()
tensorboard --logdir=logs --port=6007
torchvision.transforms
- Totensor
trans_totensor = transforms.ToTensor()
img_tensor = trans_totensor(img)
writer.add_image("Totensor",img_tensor)
- Normalize
print(img_tensor[0][0][0])
trans_norm = transforms.Normalize([6,3,2],[9,3,5])
img_norm = trans_norm(img_tensor)
print(img_norm[0][0][0])
writer.add_image("Normalize",img_norm,2)
- Resize
trans_resie = transforms.Resize((512,512))
img_reszie = trans_resie(img)
img_reszie = trans_totensor(img_reszie)
- Compose - reszie - 2
trans_resie_2 = transforms.Resize(512)
trans_compose = transforms.Compose([trans_resie_2,trans_totensor])
img_reszie_2 = trans_compose(img)
torchvision
import torchvision
from tensorboardX import SummaryWriter
dataset_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
train_set = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=dataset_transform,download=True)
test_set = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=dataset_transform,download=True)
writer = SummaryWriter("logs")
for i in range(10):
img,target = test_set[i]
writer.add_image("test_set",img,i)
writer.close()
torchvision.datasets
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
import torchvision
test_data = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor())
test_loader = DataLoader(dataset=test_data,batch_size=64,shuffle=True,num_workers=0,drop_last=True)
img, target = test_data[0]
print(img.shape)
print(target)
writer = SummaryWriter("dataloader")
for epoch in range(2):
step = 0
for data in test_loader:
imgs, targets = data
writer.add_images("Epoch:{}".format(epoch), imgs, step)
step = step + 1
writer.close()
torch.nn.moduel → 搭建神经网络
import torch
import torchvision
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("../dataset",train=True,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(dataset,batch_size=64)
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)
def forward(self,x):
x = self.conv1(x)
return x
writer = SummaryWriter("../logs")
MyModule = Module()
step = 0
for data in dataloader:
imgs,targets = data
output = MyModule(imgs)
output = torch.reshape(output,(-1,3,30,30))
print("input:",imgs.shape)
print("output:",output.shape)
writer.add_images("input",imgs,step)
writer.add_images("output",output,step)
step = step + 1
writer.close()
Pooling layers
import torch
import torchvision.datasets
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import Dataset, DataLoader
dataset = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(dataset,batch_size=64)
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)
def forward(self,input):
output = self.maxpool1(input)
return output
mymodule = Module()
writer = SummaryWriter("../logs")
step = 0
for data in dataloader:
imgs,targets = data
output = mymodule(imgs)
print(imgs.shape)
print(output.shape)
writer.add_images("input",imgs,step)
writer.add_images("output",output,step)
step = step + 1
writer.close()
Convolution Layers
import torch
import torchvision
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(dataset,batch_size=64)
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)
def forward(self,x):
x = self.conv1(x)
return x
writer = SummaryWriter("../logs")
MyModule = Module()
step = 0
for data in dataloader:
imgs,targets = data
output = MyModule(imgs)
output = torch.reshape(output,(-1,3,30,30))
print("input:",imgs.shape)
print("output:",output.shape)
writer.add_images("input",imgs,step)
writer.add_images("output",output,step)
step = step + 1
writer.close()
Linear Layers
import torch
import torchvision.datasets
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(dataset,batch_size=64,drop_last=True)
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.linear1 = Linear(in_features=196608,out_features=10)
def forward(self,input):
output = self.linear1(input)
return output
mymodule = Module()
for data in dataloader:
imgs,targets = data
print("input", imgs.shape)
input = torch.flatten(imgs)
print("input",input.shape)
output = mymodule(input)
print(output.shape)
Loss Functions
import torchvision.datasets
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
datase = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(datase,batch_size=64)
class CIFAR10_Module(nn.Module):
def __init__(self):
super(CIFAR10_Module, self).__init__()
self.module1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self,x):
x = self.module1(x)
return x
cifar_module = CIFAR10_Module()
loss = nn.CrossEntropyLoss()
for data in dataloader:
imgs,targets = data
output = cifar_module(imgs)
result = loss(output,targets)
print(result)
torch.optim
import torch.optim.sgd
import torchvision.datasets
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
datase = torchvision.datasets.CIFAR10("../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=False)
dataloader = DataLoader(datase,batch_size=64)
class CIFAR10_Module(nn.Module):
def __init__(self):
super(CIFAR10_Module, self).__init__()
self.module1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self,x):
x = self.module1(x)
return x
cifar_module = CIFAR10_Module()
loss = nn.CrossEntropyLoss()
optim = torch.optim.SGD(cifar_module.parameters(),lr=0.01)
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs,targets = data
output = cifar_module(imgs)
result_loss = loss(output,targets)
optim.zero_grad()
result_loss.backward()
optim.step()
running_loss = running_loss + result_loss
print(running_loss)
修改现有网络
import torchvision
from torch.nn import Linear
vgg16_True = torchvision.models.vgg16(pretrained=True)
vgg16_Flase = torchvision.models.vgg16(pretrained=False)
print(vgg16_True)
vgg16_True.classifier.add_module("mymodule",Linear(1000,10))
print(vgg16_True)
vgg16_Flase.classifier[6] = Linear(4096,10)
print(vgg16_Flase)
网络模型的下载和保存
import torch
import torchvision.models
vgg16 = torchvision.models.vgg16(pretrained=False)
torch.save(vgg16,"../module/vgg16_method1.pth",)
torch.save(vgg16.state_dict(),"../module/vgg16_method2.pth")
import torch
import torchvision.models
vgg16_method1 = torch.load("../module/vgg16_method1.pth")
vgg16_method2 = torchvision.models.vgg16(pretrained=Falsh