pytorch任课(第三章)

import torch
#torch.where

a = torch.rand(4, 4)
b = torch.rand(4, 4)

print(a)
print(b)
tensor([[0.4295, 0.4639, 0.1673, 0.8253],
        [0.6463, 0.0885, 0.6070, 0.9644],
        [0.3394, 0.3260, 0.6519, 0.7562],
        [0.7720, 0.7656, 0.3653, 0.3863]])
tensor([[0.6792, 0.0703, 0.4598, 0.5104],
        [0.0588, 0.2977, 0.2643, 0.4308],
        [0.7312, 0.1989, 0.3239, 0.7443],
        [0.4825, 0.8893, 0.9637, 0.6021]])
out = torch.where(a > 0.5, a, b)

print(out)

tensor([[0.6792, 0.0703, 0.4598, 0.8253],
        [0.6463, 0.2977, 0.6070, 0.9644],
        [0.7312, 0.1989, 0.6519, 0.7562],
        [0.7720, 0.7656, 0.9637, 0.6021]])
#torch.index_select

print("torch.index_select")
a = torch.rand(4, 4)
print(a)
out = torch.index_select(a, dim=0,
                   index=torch.tensor([0, 3, 2]))

print(out, out.shape)
torch.index_select
tensor([[0.7705, 0.5761, 0.7269, 0.7728],
        [0.7756, 0.3060, 0.3483, 0.7261],
        [0.1148, 0.0427, 0.9248, 0.7385],
        [0.2648, 0.1425, 0.3678, 0.2053]])
tensor([[0.7705, 0.5761, 0.7269, 0.7728],
        [0.2648, 0.1425, 0.3678, 0.2053],
        [0.1148, 0.0427, 0.9248, 0.7385]]) torch.Size([3, 4])
#torch.gather

print("torch.gather")
a = torch.linspace(1, 16, 16).view(4, 4)

print(a)

out = torch.gather(a, dim=0,
             index=torch.tensor([[0, 1, 1, 1],
                                 [0, 1, 2, 2],
                                 [0, 1, 3, 3]]))
print(out)
print(out.shape)
torch.gather
tensor([[ 1.,  2.,  3.,  4.],
        [ 5.,  6.,  7.,  8.],
        [ 9., 10., 11., 12.],
        [13., 14., 15., 16.]])
tensor([[ 1.,  6.,  7.,  8.],
        [ 1.,  6., 11., 12.],
        [ 1.,  6., 15., 16.]])
torch.Size([3, 4])
#dim=0, out[i, j, k] = input[index[i, j, k], j, k]
#dim=1, out[i, j, k] = input[i, index[i, j, k], k]
#dim=2, out[i, j, k] = input[i, j, index[i, j, k]]
#torch.masked_index

print("torch.masked_index")
a = torch.linspace(1, 16, 16).view(4, 4)
mask = torch.gt(a, 8)
print(a)
print(mask)
out = torch.masked_select(a, mask)
print(out)
torch.masked_index
tensor([[ 1.,  2.,  3.,  4.],
        [ 5.,  6.,  7.,  8.],
        [ 9., 10., 11., 12.],
        [13., 14., 15., 16.]])
tensor([[False, False, False, False],
        [False, False, False, False],
        [ True,  True,  True,  True],
        [ True,  True,  True,  True]])
tensor([ 9., 10., 11., 12., 13., 14., 15., 16.])
#torch.take

print("torch.take")
a = torch.linspace(1, 16, 16).view(4, 4)

b = torch.take(a, index=torch.tensor([0, 15, 13, 10]))

print(b)

torch.take
tensor([ 1., 16., 14., 11.])
#torch.nonzero
print("torch.take")
a = torch.tensor([[0, 1, 2, 0], [2, 3, 0, 1]])
out = torch.nonzero(a)
print(out)
#稀疏表示
torch.take
tensor([[0, 1],
        [0, 2],
        [1, 0],
        [1, 1],
        [1, 3]])


..\torch\csrc\utils\python_arg_parser.cpp:756: UserWarning: This overload of nonzero is deprecated:
	nonzero(Tensor input, *, Tensor out)
Consider using one of the following signatures instead:
	nonzero(Tensor input, *, bool as_tuple)

a = torch.zeros((2, 4))
b = torch.ones((2, 4))

out = torch.cat((a,b),dim=0)
print(out)
tensor([[0., 0., 0., 0.],
        [0., 0., 0., 0.],
        [1., 1., 1., 1.],
        [1., 1., 1., 1.]])
out = torch.cat((a,b),dim=1)
print(out)


tensor([[0., 0., 0., 0., 1., 1., 1., 1.],
        [0., 0., 0., 0., 1., 1., 1., 1.]])
#torch.stack

print("torch.stack")
a = torch.linspace(1, 6, 6).view(2, 3)
b = torch.linspace(7, 12, 6).view(2, 3)
print(a,'\n', b)

torch.stack
tensor([[1., 2., 3.],
        [4., 5., 6.]]) 
 tensor([[ 7.,  8.,  9.],
        [10., 11., 12.]])
out = torch.stack((a, b), dim=0)
print(out)
print(out.shape)

out = torch.stack((a, b), dim=1)
print(out)
print(out.shape)

out = torch.stack((a, b), dim=2)
print(out)
print(out.shape)

print(out[:, :, 0])
print(out[:, :, 1])
tensor([[[ 1.,  2.,  3.],
         [ 4.,  5.,  6.]],

        [[ 7.,  8.,  9.],
         [10., 11., 12.]]])
torch.Size([2, 2, 3])
tensor([[[ 1.,  2.,  3.],
         [ 7.,  8.,  9.]],

        [[ 4.,  5.,  6.],
         [10., 11., 12.]]])
torch.Size([2, 2, 3])
tensor([[[ 1.,  7.],
         [ 2.,  8.],
         [ 3.,  9.]],

        [[ 4., 10.],
         [ 5., 11.],
         [ 6., 12.]]])
torch.Size([2, 3, 2])
tensor([[1., 2., 3.],
        [4., 5., 6.]])
tensor([[ 7.,  8.,  9.],
        [10., 11., 12.]])

a = torch.rand(2, 3)
print(a)
out = torch.reshape(a, (3, 2))
print(out)
print(torch.t(out))
print(torch.transpose(out, 0, 1))
tensor([[0.0207, 0.3706, 0.8209],
        [0.3346, 0.3999, 0.5367]])
tensor([[0.0207, 0.3706],
        [0.8209, 0.3346],
        [0.3999, 0.5367]])
tensor([[0.0207, 0.8209, 0.3999],
        [0.3706, 0.3346, 0.5367]])
tensor([[0.0207, 0.8209, 0.3999],
        [0.3706, 0.3346, 0.5367]])

a = torch.rand(1, 2, 3)
out = torch.transpose(a, 0, 1)
print(out)
print(out.shape)

tensor([[[0.0473, 0.6523, 0.3568]],

        [[0.3176, 0.1688, 0.1190]]])
torch.Size([2, 1, 3])
out = torch.squeeze(a)
print(out)
print(out.shape)
out = torch.unsqueeze(a, -1)
print(out.shape)
tensor([[0.0473, 0.6523, 0.3568],
        [0.3176, 0.1688, 0.1190]])
torch.Size([2, 3])
torch.Size([1, 2, 3, 1])
out = torch.unbind(a, dim=2)
print(out)
print(a)
print(a.shape)
print(torch.flip(a, dims=[2, 1]))
print(a)
print(a.shape)

(tensor([[0.0473, 0.3176]]), tensor([[0.6523, 0.1688]]), tensor([[0.3568, 0.1190]]))
tensor([[[0.0473, 0.6523, 0.3568],
         [0.3176, 0.1688, 0.1190]]])
torch.Size([1, 2, 3])
tensor([[[0.1190, 0.1688, 0.3176],
         [0.3568, 0.6523, 0.0473]]])
tensor([[[0.0473, 0.6523, 0.3568],
         [0.3176, 0.1688, 0.1190]]])
torch.Size([1, 2, 3])
print(a)
out = torch.rot90(a, -1, dims=[0, 2])
print(out)
print(out.shape)
tensor([[[0.0473, 0.6523, 0.3568],
         [0.3176, 0.1688, 0.1190]]])
tensor([[[0.0473],
         [0.3176]],

        [[0.6523],
         [0.1688]],

        [[0.3568],
         [0.1190]]])
torch.Size([3, 2, 1])

import torch
import numpy as np
import cv2  #open cv
data = cv2.imread(r"C:\test.png")
print(data) #导入的图像为numpy
cv2.imshow('test',data)#展示data图,定义为test
cv2.waitKey(0 ) 
[[[204 204 204]
  [204 204 204]
  [204 204 204]
  ...
  [204 204 204]
  [204 204 204]
  [204 204 204]]

 [[204 204 204]
  [204 204 204]
  [204 204 204]
  ...
  [204 204 204]
  [204 204 204]
  [204 204 204]]

 [[204 204 204]
  [204 204 204]
  [204 204 204]
  ...
  [204 204 204]
  [204 204 204]
  [204 204 204]]

 ...

 [[204 204 204]
  [204 204 204]
  [204 204 204]
  ...
  [204 204 204]
  [204 204 204]
  [204 204 204]]

 [[204 204 204]
  [204 204 204]
  [204 204 204]
  ...
  [204 204 204]
  [204 204 204]
  [204 204 204]]

 [[204 204 204]
  [204 204 204]
  [204 204 204]
  ...
  [204 204 204]
  [204 204 204]
  [204 204 204]]]





-1
# a = np.zeros([2, 2])
out = torch.from_numpy(data)
#print(out)

#out = out.to(torch.device("cuda"))
#print(out.is_cuda)
out = torch.flip(out, dims=[0])
#out = out.to(torch.device("cpu"))
#print(out.is_cuda)

data = out.numpy()
cv2.imshow("test2", data)
cv2.waitKey(0) 
-1

import torch
from torch.autograd import Variable

# part 1
#x = Variable(torch.ones(2, 2), requires_grad=True)
x = torch.ones(2, 2, requires_grad=True)

x.register_hook(lambda grad:grad*2)  #钩子函数,梯度变2倍

y = x + 2
z = y * y * 3
nn = torch.rand(2, 2)
#nn = torch.ones(2, 2)
print(nn)

z.backward(torch.ones(2,2))
#z.backward(gradient=nn)

print(x.grad)
print(y.grad)
print(x.grad_fn)
print(y.grad_fn)
print(z.grad_fn)
# z = torch.sum(z)
tensor([[0.9284, 0.0764],
        [0.0969, 0.5649]])
tensor([[36., 36.],
        [36., 36.]])
None
None
<AddBackward0 object at 0x0000012E7F78F748>
<MulBackward0 object at 0x0000012E7E816448>
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
z = y * y * 3
#nn = torch.rand(2, 2)
nn = torch.ones(2, 2)

print(nn)
#z.backward(gradient=nn, retain_graph=True)
torch.autograd.backward(z,grad_tensors=nn,
                        retain_graph=True)
#print(torch.autograd.grad(z, [x, y, z],
#                    grad_outputs=nn))
print(x.grad)
print(y.grad)
print(x.grad_fn)
print(y.grad_fn)
print(z.grad_fn)
z.backward(gradient=nn, retain_graph=True)
print(x.grad)
z.backward(gradient=nn, retain_graph=True)
print(x.grad)   #backward累加;grad不累加
tensor([[1., 1.],
        [1., 1.]])
tensor([[18., 18.],
        [18., 18.]])
None
None
<AddBackward0 object at 0x0000012E7E1D2AC8>
<MulBackward0 object at 0x0000012E7EA9D0C8>
tensor([[36., 36.],
        [36., 36.]])
tensor([[54., 54.],
        [54., 54.]])

class line(torch.autograd.Function):
    @staticmethod    #静态函数
    def forward(ctx, w, x, b):  #ctx上下文的管理器
        #y = w*x +b
        ctx.save_for_backward(w, x, b)
        return w * x + b

    @staticmethod
    def backward(ctx, grad_out):
        w, x, b = ctx.saved_tensors

        grad_w = grad_out * x
        grad_x = grad_out * w
        grad_b = grad_out

        return grad_w, grad_x, grad_b
w = torch.rand(2, 2, requires_grad=True)
x = torch.rand(2, 2, requires_grad=True)
b = torch.rand(2, 2, requires_grad=True)

out = line.apply(w, x, b)
out.backward(torch.ones(2, 2))
print(w, '\n',x,'\n', b)
print(w.grad,'\n', x.grad, '\n',b.grad)

tensor([[0.5751, 0.0111],
        [0.9680, 0.5252]], requires_grad=True) 
 tensor([[0.6795, 0.1127],
        [0.7956, 0.8157]], requires_grad=True) 
 tensor([[0.0626, 0.7311],
        [0.6207, 0.9900]], requires_grad=True)
tensor([[0.6795, 0.1127],
        [0.7956, 0.8157]]) 
 tensor([[0.5751, 0.0111],
        [0.9680, 0.5252]]) 
 tensor([[1., 1.],
        [1., 1.]])

import visdom
import numpy as np
vis=visdom.Visdom()
vis.text('hello world!')
vis.image(np.ones((3,10,10)))
Setting up a new session...





'window_38fbf7e2988e0a'

from tensorboardX import SummaryWriter

writer = SummaryWriter("log")  
#writer是写入器;  'log'是路径(多了log文件夹)
for i in range(100):
    writer.add_scalar("a", i, global_step=i)
    #i对应a的值,step为步长,标值第几次写入值
    writer.add_scalar("b", i ** 2, global_step=i)
writer.close()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值