import torch
import torch.nn as nn
x = torch.randn(6, 512, 1, 1)
print(x, x.shape)
# # x_ = torch.randn(64, 5)
x_ = x.view(6, 512)
m_ = nn.Linear(512, 5)
m__ = nn.Linear(512, 5)
y_ = m_(x_)
y__ = m__(x_)
print(m_.weight, m_.weight.shape)
print(m_.bias, m_.bias.shape)
print(y_, y_.shape)
# linear(512, 5)时候,输入张量形状为(6, 512),输出形状为张量为(6, 5),权重张量形状为(6, 512),偏执张量形状为(5)
print(torch.sum(x_[0]*m_.weight[0])+m_.bias[0], torch.sum(x_[0]*m_.weight[1])+m_.bias[1]) # 此语句为验证输出张量中的前两个元素
print("****"*25)
print(m__.weight, m__.weight.shape)
print(m__.bias, m__.bias.shape)
print(y__, y__.shape)
print(torch.sum(x_[0]*m__.weight[0])+m__.bias[0], torch.sum(x_[0]*m__.weight[1])+m__.bias[1])
m = nn.Conv2d(512, 5, (1, 1), padding=0)
y = m(x)
print(m.weight, m.weight.shape)
print(m.bias, m.bias.shape)
print(y, y.shape)
print(torch.sum(x[0]*m.weight[0])+m.bias[0]) # 此语句为验证输出张量中的第一个元素
print(torch.sum(x[0]*m.weight[1])+m.bias[1]) # 此语句为验证输出张量中的第二个元素
print(torch.sum(x[5]*m.weight[4])+m.bias[4]) # 此语句为验证输出张量中的最后一个元素
输出结果如下所示:
tensor([[[[-0.0227]],
[[-0.3054]],
[[ 0.5264]],
...,
[[ 0.5306]],
[[ 0.0769]],
[[-0.3278]]],
[[[ 0.0224]],
[[

这篇博客探讨了PyTorch中线性层(Linear)和卷积层(Conv2d)的使用。通过示例代码展示了它们如何处理相同形状的输入张量,并输出不同的结果。这主要归因于权重和偏置的初始化。尽管两者在特定情况下(如kernel_size=(1,1),padding=0)有相似的效果,但它们在实际应用中扮演的角色和效果是不同的。
最低0.47元/天 解锁文章
5337

被折叠的 条评论
为什么被折叠?



