pytorch学习笔记二——nn.Module

一、五种模型构造形式

nn.Sequential()

# 使用nn.Sequential搭建网络
# 一个普通的单层神经网络
Sequen_net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))

X = torch.rand(3, 20)
print("X: ", X)
net1 = Sequen_net(X)
print("net1: ", net1)

结果:

X:  tensor([[0.6513, 0.1193, 0.2606, 0.4497, 0.2970, 0.9101, 0.1941, 0.9283, 0.5954,
         0.0683, 0.6453, 0.8361, 0.5083, 0.7473, 0.4150, 0.8179, 0.3845, 0.1216,
         0.7870, 0.9959],
        [0.2798, 0.2982, 0.8267, 0.9409, 0.2763, 0.8739, 0.6138, 0.5610, 0.6146,
         0.3400, 0.9731, 0.7196, 0.7783, 0.0356, 0.0168, 0.9358, 0.8763, 0.0903,
         0.8762, 0.4948],
        [0.4094, 0.7204, 0.8304, 0.4614, 0.3940, 0.4561, 0.8946, 0.5907, 0.8838,
         0.1645, 0.5259, 0.9783, 0.8897, 0.3766, 0.9449, 0.3011, 0.3275, 0.5742,
         0.7173, 0.3336]])
net1:  tensor([[ 0.0874,  0.1452, -0.1404, -0.0427,  0.2162, -0.2174,  0.0608,  0.0526,
          0.0731, -0.0225],
        [ 0.0295,  0.0829, -0.1371, -0.0923,  0.1686, -0.2626,  0.1728,  0.0373,
         -0.0400,  0.0174],
        [-0.0100,  0.0885, -0.1002, -0.0672,  0.0749, -0.1790,  0.1430,  0.1640,
          0.0572,  0.0146]], grad_fn=<AddmmBackward0>)

class自定义类

所有网络组件都应该从 nn.Module 继承并覆盖 forward() 方法。

# 自定义类搭建网络(网络构造同上)
X = torch.rand(3, 20)


class MLP(nn.Module):  # nn.Module 任何一个神经网络的层都是Module的子类
    def __init__(self):  # 定义需要的类和参数
        super(MLP, self).__init__()  # 调用父类
        self.hidden = nn.Linear(20, 256)
        self.out = nn.Linear(256, 10)

    def forward(self, X):  # 前向传播函数 输入X
        return self.out(F.relu(self.hidden(X)))


net2 = MLP()
print("net2: ", net2(X))

结果:

net2:  tensor([[ 0.1871, -0.0803,  0.2287, -0.1312,  0.1852,  0.0470, -0.1633,  0.0177,
          0.0817,  0.1049],
        [ 0.2333,  0.0757,  0.2480, -0.0570,  0.2533,  0.0042, -0.2117,  0.0129,
          0.0444,  0.0404],
        [ 0.1890,  0.0458,  0.2060, -0.0098,  0.1948,  0.0225, -0.1746, -0.0491,
          0.0820,  0.0435]], grad_fn=<AddmmBackward0>)

顺序块

# 顺序块   nn.Sequential实现
class MySequential(nn.Module):
    def __init__(self, *args):
        super(MySequential, self).__init__()
        for block in args:
            self._modules[block] = block  # 按顺序排列的一个字典

    def forward(self, X):
        for block in self._modules.values():
            # print("block: ", block)
            X = block(X)
        return X


net3 = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
print("net3: ", net3(X))

结果:

net3:  tensor([[-0.0630,  0.1005,  0.1431,  0.2444, -0.0660, -0.0654,  0.0188,  0.2016,
         -0.4174, -0.1706],
        [ 0.0219,  0.0262,  0.1507,  0.1690, -0.0820,  0.0212, -0.1419,  0.3118,
         -0.3934, -0.1119],
        [-0.0213, -0.0285,  0.0478,  0.1897, -0.1706,  0.0083, -0.1190,  0.0981,
         -0.3782, -0.1059]], grad_fn=<AddmmBackward0>)

requires_grad=False

class otherMLP(nn.Module):
    def __init__(self):
        super(otherMLP, self).__init__()
        # rand_weight不参与训练   requires_grad=False不计算梯度,所以不参与训练
        self.rand_weight = torch.rand((20, 20), requires_grad=False)
        self.linear = nn.Linear(20, 20)

    def forward(self, X):
        X = self.linear(X)
        X = F.relu(torch.mm(X, self.rand_weight) + 1)
        X = self.linear(X)
        while X.abs().sum() > 1:
            X /= 2
        return X.sum()


net4 = otherMLP()
print("net4: ", net4(X))

结果:

net4:  tensor(0.0289, grad_fn=<SumBackward0>)

混合搭配各种组合块的方法

# 混合搭配各种组合块的方法
class NestMLP(nn.Module):
    def __init__(self):
        super(NestMLP, self).__init__()
        self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
                                 nn.Linear(64, 16), nn.ReLU())
        self.linear = nn.Linear(16, 16)

    def forward(self, X):
        return self.linear(self.net(X))


net5 = nn.Sequential(NestMLP(), nn.Linear(16, 20), otherMLP())
print("net5: ", net5(X))

结果:

net5:  tensor(-0.1181, grad_fn=<SumBackward0>)

二、参数管理

net.state_dict()

net_paraManag = nn.Sequential(nn.Linear(4, 8), nn.ReLU(
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

泤燃

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值