for child in module.children(): # 遍历模型的网络结构
# child为每一层网络的每一个部分
for k, p in child._parameters.items():
# k为每一层的参数名称,如weight,bias
# p为参数的具体数值
for name, module in self.layers.named_children():
# name 为每一层的名称 如:conv1,fc4
# module 为该层的具体网络结构,如:
# Sequential(
# (0): Conv2d(3, 96, kernel_size=(7, 7), stride=(2, 2))
# (1): ReLU()
# (2): LRN()
# (3): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
# )
for k, p in self.params.items():
# k 为每一层的参数名称 如:conv1_weight conv1_bias fc4_weight fc6_0_bias
# p 为相对应参数的具体数值
torch.cat([shape=(3,4),shape=(3,4)],0).shape = (6,4)
torch.cat([shape=(3,4),shape=(3,4)],1).shape = (3,8)
torch.topk(shape=(3,2),k,dim=0/1) 返回dim=0/1 方向上前k大的元素及其索引
>>> x
tensor([[1.8801e-07, 3.0927e-41, 2.8026e-44],
[0.0000e+00, nan, 3.0927e-41],
[1.3733e-14, 4.7429e+30, 1.9431e-19],
[4.7429e+30, 5.0938e-14, 1.7661e-04]])
>>> ans = torch.topk(x,2,dim=0)
>>> ans
(tensor([[4.7429e+30, 4.7429e+30, 1.7661e-04],
[1.8801e-07, 5.0938e-14, 1.9431e-19]]), tensor([[3, 2, 3],
[0, 3, 2]]))
>>> ans[0]
tensor([[4.7429e+30, 4.7429e+30, 1.7661e-04],
[1.8801e-07, 5.0938e-14, 1.9431e-19]])
>>> ans[1]
tensor([[3, 2, 3],
[0, 3, 2]])