【nn.Sequential 因为报错,原因是括号的位置导致问题】

本文解决了一个因括号位置不当导致的TypeError问题,在PyTorch模型定义中,nn.Sequential的使用需要正确放置括号以确保所有层被正确包含。通过调整括号位置,使模型能够成功构建并运行。
部署运行你感兴趣的模型镜像

TypeError: torch.FloatTensor is not a Module subclass

这个错误搞了好久,竟然是一个括号的问题!记录一下

class Longtao(nn.Module):
    def __init__(self):
        super(Longtao, self).__init__()
        self.model = nn.Sequential
        (
            #  ↑ 括号导致报错
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),

            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )

##该部分代码存在问题

class Longtao(nn.Module):
    def __init__(self):
        super(Longtao, self).__init__()
        self.model = nn.Sequential(
            #  括号导致报错  注意将这个括号放到上面去!!!
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),

            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )
        def forward(self, x):
        	x = self.model(x)
        	return x
if __name__ == '__main__':
    longge = Longtao()
    input = torch.ones((64, 3, 32, 32))
    output = longge(input)
    print(output.shape)

至此,修改完那个括号,,代码可以正常运行!

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

class OrientedASPP(nn.Module): def __init__(self, dim_in, dim_out, rate=1): super().__init__() # 方向滤波器组:0°, 45°, 90°, 135° self.conv_arrange = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=True), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True) ) self.branch1 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.branch2 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=6 * rate, dilation=6 * rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.branch3 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=12 * rate, dilation=12 * rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.branch4 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=18 * rate, dilation=18 * rate, bias=True), # SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.BatchNorm2d(dim_out), nn.ReLU(inplace=True), ) self.orient_att = nn.Sequential( # 方向注意力 nn.Conv2d(5 * dim_out, 5, 1), nn.Softmax(dim=1) ) def forward(self, x): feats = [] branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) feats.append(branch1(x)) feats.append(branch2(x)) feats.append(branch3(x)) feats.append(branch4(x)) fused = torch.cat(feats, dim=1) att = self.orient_att(fused) # [N,4,H,W] # 方向加权融合 return sum([att[:, i:i + 1] * feats for i in range(4)])报错TypeError: 'Tensor' object is not callable如何解决
03-13
class MultiTaskSpectralCNN(nn.Module): def __init__(self, num_wifi_channels=13, num_bt_channels=13, num_zigbee_channels=13, num_cw_channels=13): super(MultiTaskSpectralCNN, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 32, kernel_size=(3, 3), padding=1), nn.ReLU(), nn.MaxPool2d(2, 2), # (250, 32) nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1), nn.ReLU(), nn.MaxPool2d(2, 2), # (125, 16) nn.Conv2d(64, 128, kernel_size=(3, 3), padding=1), nn.ReLU(), nn.MaxPool2d(2, 2), # (62, 8) ) self.feature_dim = 128 * 62 * 8 # 分支1: 协议存在性(multi-label) self.presence_head = nn.Sequential( nn.Flatten(), nn.Linear(self.feature_dim, 128), nn.ReLU(), nn.Dropout(0.5), nn.Linear(128, 4) ) # 分支2: WiFi 信道分类 self.wifi_channel_head = nn.Sequential( nn.Flatten(), nn.Linear(self.feature_dim, 128), nn.ReLU(), nn.Dropout(0.5), nn.Linear(128, num_wifi_channels) ) # 分支3: bt 信道分类 self.bt_channel_head = nn.Sequential( nn.Flatten(), nn.Linear(self.feature_dim, 128), nn.ReLU(), nn.Dropout(0.5), nn.Linear(128, num_bt_channels) ) # 分支4: ZigBee 信道分类 self.zigbee_channel_head = nn.Sequential( nn.Flatten(), nn.Linear(self.feature_dim, 128), nn.ReLU(), nn.Dropout(0.5), nn.Linear(128, num_zigbee_channels) ) # 分支5: CW 信道分类 self.cw_channel_head = nn.Sequential( nn.Flatten(), nn.Linear(self.feature_dim, 128), nn.ReLU(), nn.Dropout(0.5), nn.Linear(128, num_cw_channels) ) def forward(self, x): features = self.features(x) return { 'presence': self.presence_head(features), 'wifi_channel': self.wifi_channel_head(features), 'bt_channel': self.bt_channel_head(features), 'zigbee_channel': self.zigbee_channel_head(features), 'cw_channel': self.cw_channel_head(features), } #%% device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 加载检查点 checkpoint = torch.load('./checkpoints/cnn_model_multi_task_1014.pth', map_location=device) # 构建模型 model = MultiTaskSpectralCNN().to(device) model.load_state_dict(checkpoint['model_state_dict']) model.eval() #%% idx = random.randint(0, 100) X_cnn[idx,:,:,:].shape #%% # 推理 with torch.no_grad(): outputs = model(X_cnn[idx,:,:,:]) # 解码各任务输出 presence_logits = outputs['presence'] wifi_logits = outputs['wifi_channel'] bt_logits = outputs['bt_channel'] zigbee_logits = outputs['zigbee_channel'] cw_logits = outputs['cw_channel'] # 应用 sigmoid / softmax 获取概率 import torch.nn.functional as F presence_probs = torch.sigmoid(presence_logits) # 多标签分类 wifi_probs = F.softmax(wifi_logits, dim=1) bt_probs = F.softmax(bt_logits, dim=1) zigbee_probs = F.softmax(zigbee_logits, dim=1) cw_probs = F.softmax(cw_logits, dim=1) 这样进行未知数据的推理对吗?X_cnn的维度是【100,1,500,64】
最新发布
10-15
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值