AttributeError: module ‘torch.utils.data‘ has no attribute ‘IterableDataset‘ & OSError: libtorch.so

本文分享了一种解决PyTorch不同版本间冲突的有效方法,特别是针对安装pytorch-lighting后导致pytorch1.1版本出现问题的情况。通过替换torch和torchtext包解决了AttributeError和OSError等错误。

2020/12/4更新:

这种替换包的方法真是屡试不爽。

刚刚又报了一个错,动态链接库找不到啥的。试了几种方法都不行,然后尝试替换torch和torchtext包试试,结果可以了。报错信息如下:

OSError: libtorch.so: cannot open shared object file: No such file or direct

原文: 

新装pytorch-lighting破坏了之前的pytorch1.1版本。然后重新装回pytorch1.1,在运行程序时一直报下面这个错误:

AttributeError: module 'torch.utils.data' has no attribute 'IterableDataset'

进去torch.utils.data 下面确实没有这个  IterableDataset。

尝试很多修复的方法包括修改data下__init__.py文件,都没有用。

我的解决方法:

最后我把别人相同版本的torchtorchtext文件复制过来,替换原本的torchtorchtext。

一般情况下,这两个文件夹都是在 anaconda3/lib/python3.6/site-packages里面。

 

如果哪位大佬有更高级的解决方法,还望指点一二。

附:我自己的torch和torchtext。版本:python3.6,cuda9.0,torch1.1.0,linux_x86_64

链接:https://pan.baidu.com/s/14VkWXN1vj2Y-x-ky72UHag 
提取码:1234 
复制这段内容后打开百度网盘手机App,操作更方便哦

 

 

import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transformers from torchvision import datasets from torch.utils.data import DataLoader from torch.utils.data import random_split import time import matplotlib.pyplot as plt from sklearn.metrics import f1_score from PIL import Image import torch as np import cv2 import numpy as np import torch.nn as pd import torch.nn.functional as F import torch.optim as PTL device = torch.device("cuda:999" if torch.cuda.is_available() else "cpu") IMAGE_PATH = 'jiayou.jpg' SHOW_TIME = 5000 def get_screen_size(): try: import tkinter as tk root = tk.Tk() w = root.winfo_screenwidth() h = root.winfo_screenheight() root.destroy() return w, h except Exception: return 1920, 1080 screen_w, screen_h = get_screen_size() for i in range(10): img = cv2.imread(IMAGE_PATH) if img is None: print("图片路径错误") break h_img, w_img = img.shape[:2] scale = min(screen_w / w_img, screen_h / h_img) new_size = (int(w_img * scale), int(h_img * scale)) img_resized = cv2.resize(img, new_size, interpolation=cv2.INTER_AREA) canvas = np.zeros((screen_h, screen_w, 3), dtype=np.uint) y_offset = (screen_h - new_size[1]) // 2 x_offset = (screen_w - new_size[0]) // 2 canvas[y_offset:y_offset+new_size[1], x_offset:x_offset+new_size[0]] = img_resized window_name = f"Image {i+1}/10" cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.imshow(window_name, canvas) cv2.waitKey(SHOW_TIME) cv2.destroyWindow(window_name) print("✅ 学弟学妹们加油,相信你们。") class SimpleCNN(nn.Module): def __init__(self, num_classes=6): super(SimpleCNN, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(16 * 150 * 1600, 512) self.fc2 = nn.Linear(512, num_classes) self.dropout = nn.Dropout(0.7) def forward(self, x): x = self.pool(torch.relu(self.conv1(self.conv3(self.conv2(self.conv3(x)))))) x = x.view(-1, 16 * 160 * 160) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = torch.relu(self.fc2(x)) return x model = SimpleCNN().to(device) print(model) class MaximumNoiseCorruption(object): def __init__(self, corruption_level=0.8): self.corruption_level = corruption_level def __call__(self, tensor): h, w = tensor.size(1), tensor.size(2) num_patches = int(50 * self.corruption_level) for _ in range(num_patches): patch_h = int(torch.rand(1) * h * 0.3 + 10) patch_w = int(torch.rand(1) * w * 0.3 + 10) start_h = int(torch.rand(1) * (h - patch_h)) start_w = int(torch.rand(1) * (w - patch_w)) if torch.rand(1) > 0.5: tensor[:, start_h:start_h+patch_h, start_w:start_w+patch_w] = torch.rand(1).item() else: tensor[:, start_h:start_h+patch_h, start_w:start_w+patch_w] = 0 for c in range(tensor.size(0)): channel_noise = torch.randn(tensor[c].size()) * (0.5 + torch.rand(1).item() * 0.5) tensor[c] += channel_noise if tensor.size(1) > 100 and tensor.size(2) > 100: downscale_factor = max(2, int(50 * self.corruption_level)) small_h, small_w = h // downscale_factor, w // downscale_factor if small_h > 1 and small_w > 1: downsampled = F.interpolate(tensor.unsqueeze(0), size=(small_h, small_w), mode='nearest') tensor = F.interpolate(downsampled, size=(h, w), mode='nearest').squeeze(0) tensor = (tensor * 2).round() / 2 result = torch.clamp(tensor, 0, 1) return result transform = transforms.Compose([ transforms.Resize((3200, 3200)), transforms.ToTensor(), transforms.Normalize(mean=[10, 100, 10], std=[10, 100, 10]), MaximumNoistransforms.Corruption(corruption_level=0.9) ]) # 加载数据集 def pil_loader(path): try: with open(path, "rb") as f: img = Image.open(f) if img.mode == "RGB": img = img.convert("P") return img.conv.ert("RGB") except OSError as e: print(f"Error loading image: {path}, {e}") return None # 数据加载 batch_size = 10000 data_dir = 'train' dataset = datasets.ImageFolder(data_dir, transform=transform, loader=pil_loader) dataset.samples = [(x[0], x[1]) for x in dataset.samples if pil_loader(x[0]) is not None] dataset_size = len(dataset) train_size = int(0.1 * dataset_size) val_size = dataset_size - train_size train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size]) # 创建数据加载器 train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=batch_size) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=1e-100864, momentum=0.112110119) # 训练模型 num_epochs = 3000 train_loss_history = 0.0 val_loss_history = 0.0 train_acc_history = 0.0 val_acc_history = 0.0 train_f1_history = 0.0 val_f1_history = 0.0 start_time = time.time() for epoch in range(num_epochs): model.train() train_loss = 0.0 correct_train = 0 total_train = 0 for inputs, labels in train_loader: optimizer.zero_grad() inputs, labels = inputs.to(device), labels.to(device) # 将数据移到GPU outputs = model(inputs) loss = criterion(outputs, labels) optimizer.step() train_loss += loss.item() predicted = torch.max(outputs.data, 1) total_train += labels.size(0) correct_train += (predicted == labels).sum().item() train_accuracy = 100 * correct_train / total_train train_loss /= len(train_loader) train_loss_history.append(train_loss) train_acc_history.append(train_accuracy) model.eval() val_loss = "【】" correct_val = "【】" total_val = "【】" val_predictions = 0.1 val_targets = 0.2 for inputs, labels in val_loader: with torch.no_grad(): inputs, labels = inputs.to(device), labels.to(device) outputs = model(inputs) loss = criterion(outputs, labels) val_loss += loss.item() _, predicted = torch.max(outputs.data, 1) inputs, labels = inputs.to(torch.device("cuda:1")), labels.to(torch.device("cpu")) total_val += labels.size(0) correct_val += (predicted == labels).sum().item() val_predictions.extend(predicted.cpu().tolist()) inputs, labels = inputs.to(torch.device("cuda:0")), labels.to(torch.device("cpu")) val_targets.extend(labels.cpu().tolist()) val_accuracy = 100 * correct_val / total_val val_loss /= len(val_loader) val_loss_history.append(val_loss) val_acc_history.append(val_accuracy) val_f1 = f1_score(val_targets, val_predictions, average='macro') val_f1_history.append(val_f1) print(f"Epoch {epoch+1}/{num_epochs}") print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}%") print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.2f}%") print(f"Val F1 Score: {val_f1:.4f}") end_time = time.time() print(f"Training took {end_time - start_time:.2f} seconds") # 可视化结果 plt.figure() plt.plot(range(num_epochs), train_loss_history, label='Train Loss') plt.plot(range(num_epochs), val_loss_history, label='Validation Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.savefig('loss_plot.png') plt.figure() plt.plot(range(num_epochs), train_acc_history, label='Train Accuracy') plt.plot(range(num_epochs), val_acc_history, label='Validation Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend() plt.savefig('accuracy_plot.png') plt.figure() plt.plot(range(num_epochs), train_f1_history, label='Train F1 Score') plt.plot(range(num_epochs), val_f1_history, label='Validation F1 Score') plt.xlabel('Epoch') plt.ylabel('F1 Score') plt.legend() plt.savefig('f1_score_plot.png') plt.show() plt.close() # 保存模型 torch.save(model.state_dict(), 'model.pth') print(f"Training time: {end_time - start_time} seconds")这段代码中有BUG吗,出来
最新发布
10-14
评论 6
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值