import torch
from torch import nn
import torch.nn.functional as F
# 首先建立一个全连接的子module,继承nn.Module
class Linear(nn.Module):
def __init__(self, in_dim, out_dim):
super(Linear, self).__init__() # 调用nn.Module的构造函数
# 使用nn.Parameter来构造需要学习的参数
self.W = nn.Parameter(torch.randn(in_dim, out_dim))
self.b = nn.Parameter(torch.randn(out_dim))
# 在forward中实现前向传播
def forward(self, x):
x = x.matmul(self.W) # nn.matmul实现矩阵相乘
y = x + self.b.expand_as(x)
return y
# 构建感知机类,继承nn,Module,并调用Linear的子module
class Perception(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim):
super(Perception, self).__init__()
self.layer1 = Linear(in_dim, hid_dim)
self.layer2 = Linear(hid_dim, out_dim)
def forward(self, x):
x = self.layer1(x)
y = torch.sigmoid(x)
y = self.layer2(y)
y = torch.sigmoid(y)
return y
if __name__ == "__main__":
p = Perception(1, 3, 2)
print(p)
# named_parameters()可以返回学习参数的迭代器,分别为参数名与参数值
for name, parameter in p.named_parameters():
print("层:", name, parameter)
intput = torch.randn(1, 1) # 代表了有4个样本,每个样本有两维
output = p(intput)
print("intput:", intput, intput.shape)
print("output:", output, output.shape)