【nn.Sequential 因为报错,原因是括号的位置导致问题】

本文解决了一个因括号位置不当导致的TypeError问题,在PyTorch模型定义中,nn.Sequential的使用需要正确放置括号以确保所有层被正确包含。通过调整括号位置,使模型能够成功构建并运行。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

TypeError: torch.FloatTensor is not a Module subclass

这个错误搞了好久,竟然是一个括号的问题!记录一下

class Longtao(nn.Module):
    def __init__(self):
        super(Longtao, self).__init__()
        self.model = nn.Sequential
        (
            #  ↑ 括号导致报错
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),

            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )

##该部分代码存在问题

class Longtao(nn.Module):
    def __init__(self):
        super(Longtao, self).__init__()
        self.model = nn.Sequential(
            #  括号导致报错  注意将这个括号放到上面去!!!
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),

            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )
        def forward(self, x):
        	x = self.model(x)
        	return x
if __name__ == '__main__':
    longge = Longtao()
    input = torch.ones((64, 3, 32, 32))
    output = longge(input)
    print(output.shape)

至此,修改完那个括号,,代码可以正常运行!

class OrientedASPP(nn.Module): def __init__(self, dim_in, dim_out, rate=1): super().__init__() # 方向滤波器组:0°, 45°, 90°, 135° self.conv_arrange = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=True), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True) ) self.branch1 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.branch2 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=6 * rate, dilation=6 * rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.branch3 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=12 * rate, dilation=12 * rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.branch4 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=18 * rate, dilation=18 * rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.orient_att = nn.Sequential( # 方向注意力 nn.Conv2d(5 * dim_out, 5, 1), nn.Softmax(dim=1) ) def forward(self, x): feats = [] branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) feats.append(branch1(x)) feats.append(branch2(x)) feats.append(branch3(x)) feats.append(branch4(x)) fused = torch.cat(feats, dim=1) att = self.orient_att(fused) # [N,4,H,W] # 方向加权融合 return sum([att[:, i:i + 1] * feats for i in range(4)])报错TypeError: 'Tensor' object is not callable如何解决
03-13
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值