#torch.index_selectprint("torch.index_select")
a = torch.rand(4,4)print(a)
out = torch.index_select(a, dim=0,
index=torch.tensor([0,3,2]))print(out, out.shape)
#torch.gatherprint("torch.gather")
a = torch.linspace(1,16,16).view(4,4)print(a)
out = torch.gather(a, dim=0,
index=torch.tensor([[0,1,1,1],[0,1,2,2],[0,1,3,3]]))print(out)print(out.shape)
#torch.takeprint("torch.take")
a = torch.linspace(1,16,16).view(4,4)
b = torch.take(a, index=torch.tensor([0,15,13,10]))print(b)
torch.take
tensor([ 1., 16., 14., 11.])
#torch.nonzeroprint("torch.take")
a = torch.tensor([[0,1,2,0],[2,3,0,1]])
out = torch.nonzero(a)print(out)#稀疏表示
torch.take
tensor([[0, 1],
[0, 2],
[1, 0],
[1, 1],
[1, 3]])
..\torch\csrc\utils\python_arg_parser.cpp:756: UserWarning: This overload of nonzero is deprecated:
nonzero(Tensor input, *, Tensor out)
Consider using one of the following signatures instead:
nonzero(Tensor input, *, bool as_tuple)
a = torch.zeros((2,4))
b = torch.ones((2,4))
out = torch.cat((a,b),dim=0)print(out)
out = torch.stack((a, b), dim=0)print(out)print(out.shape)
out = torch.stack((a, b), dim=1)print(out)print(out.shape)
out = torch.stack((a, b), dim=2)print(out)print(out.shape)print(out[:,:,0])print(out[:,:,1])
# a = np.zeros([2, 2])
out = torch.from_numpy(data)#print(out)#out = out.to(torch.device("cuda"))#print(out.is_cuda)
out = torch.flip(out, dims=[0])#out = out.to(torch.device("cpu"))#print(out.is_cuda)
data = out.numpy()
cv2.imshow("test2", data)
cv2.waitKey(0)
-1
import torch
from torch.autograd import Variable
# part 1#x = Variable(torch.ones(2, 2), requires_grad=True)
x = torch.ones(2,2, requires_grad=True)
x.register_hook(lambda grad:grad*2)#钩子函数,梯度变2倍
y = x +2
z = y * y *3
nn = torch.rand(2,2)#nn = torch.ones(2, 2)print(nn)
z.backward(torch.ones(2,2))#z.backward(gradient=nn)print(x.grad)print(y.grad)print(x.grad_fn)print(y.grad_fn)print(z.grad_fn)# z = torch.sum(z)
tensor([[0.9284, 0.0764],
[0.0969, 0.5649]])
tensor([[36., 36.],
[36., 36.]])
None
None
<AddBackward0 object at 0x0000012E7F78F748>
<MulBackward0 object at 0x0000012E7E816448>
x = torch.ones(2,2, requires_grad=True)
y = x +2
z = y * y *3#nn = torch.rand(2, 2)
nn = torch.ones(2,2)print(nn)#z.backward(gradient=nn, retain_graph=True)
torch.autograd.backward(z,grad_tensors=nn,
retain_graph=True)#print(torch.autograd.grad(z, [x, y, z],# grad_outputs=nn))print(x.grad)print(y.grad)print(x.grad_fn)print(y.grad_fn)print(z.grad_fn)
z.backward(gradient=nn, retain_graph=True)print(x.grad)
z.backward(gradient=nn, retain_graph=True)print(x.grad)#backward累加;grad不累加
classline(torch.autograd.Function):
@staticmethod#静态函数defforward(ctx, w, x, b):#ctx上下文的管理器#y = w*x +b
ctx.save_for_backward(w, x, b)return w * x + b
@staticmethoddefbackward(ctx, grad_out):
w, x, b = ctx.saved_tensors
grad_w = grad_out * x
grad_x = grad_out * w
grad_b = grad_out
return grad_w, grad_x, grad_b
w = torch.rand(2,2, requires_grad=True)
x = torch.rand(2,2, requires_grad=True)
b = torch.rand(2,2, requires_grad=True)
out = line.apply(w, x, b)
out.backward(torch.ones(2,2))print(w,'\n',x,'\n', b)print(w.grad,'\n', x.grad,'\n',b.grad)
import visdom
import numpy as np
vis=visdom.Visdom()
vis.text('hello world!')
vis.image(np.ones((3,10,10)))
Setting up a new session...
'window_38fbf7e2988e0a'
from tensorboardX import SummaryWriter
writer = SummaryWriter("log")#writer是写入器; 'log'是路径(多了log文件夹)for i inrange(100):
writer.add_scalar("a", i, global_step=i)#i对应a的值,step为步长,标值第几次写入值
writer.add_scalar("b", i **2, global_step=i)
writer.close()