cifar10 32*32转227*227 适合AlexNet

def read_cifar10_data():
    data_dir = CURRENT_DIR+'/data/cifar-10-batches-py/'
    train_name = 'data_batch_'
    test_name = 'test_batch'
    train_X = None
    train_Y = None
    test_X = None
    test_Y = None

    # train data
    for i in range(1,6):
        file_path = data_dir+train_name+str(i)
        with open(file_path, 'rb') as fo:
            
            dict = cPickle.load(fo)
           
            if  train_X is None:
                train_X = dict['data']
                train_Y = dict['labels']
            else:
                train_X = np.concatenate((train_X, dict['data']), axis=0)
                train_Y = np.concatenate((train_Y, dict['labels']), axis=0)
                
                
    # test_data
    file_path = data_dir + test_name
    with open(file_path, 'rb') as fo:
        dict = cPickle.load(fo)

        test_X = dict['data']
        test_Y = dict['labels']
    
    
    train_X = train_X.reshape((50000, 3, 32, 32)).transpose(0, 2, 3, 1)
    
    train_X_resized = np.zeros((50000,227,227,3))# 创建一个存储修改过图片分辨率的矩阵
    
    for i in range(0,50000):
        img = train_X[i]
        img = Image.fromarray(img)
        img = np.array(img.resize((227,227),Image.BICUBIC))# 修改分辨率,再转为array类
        train_X_resized[i,:,:,:] = img
    
    test_X = test_X.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
    
    test_X_resized = np.zeros((10000,227,227,3))# 创建一个存储修改过图片分辨率的矩阵
    
    for i in range(0,10000):
        img = test_X[i]
        img = Image.fromarray(img)
        img = np.array(img.resize((227,227),Image.BICUBIC))# 修改分辨率,再转为array类
        test_X_resized[i,:,:,:] = img
    

    train_y_vec = np.zeros((len(train_Y), 10), dtype=np.float)
    test_y_vec = np.zeros((len(test_Y), 10), dtype=np.float)
    for i, label in enumerate(train_Y):
        train_y_vec[i, int(train_Y[i])] = 1.  # y_vec[1,3] means #2 row, #4column
    for i, label in enumerate(test_Y):
        test_y_vec[i, int(test_Y[i])] = 1.  # y_vec[1,3] means #2 row, #4column

    return train_X_resized/255., train_y_vec, test_X_resized/255., test_y_vec

缺点:需要至少60G内存

以下是使用PyTorch实现AlexNetCIFAR10数据集上进行训练的代码,同时将输入图像大小调整为32x32: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms # Define the neural network architecture class AlexNet(nn.Module): def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(192, 384, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x # Define the data preprocessing transform = transforms.Compose([ transforms.Resize(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) # Load the CIFAR10 dataset trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) # Create the neural network model and optimizer net = AlexNet() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) # Train the neural network model for epoch in range(10): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 100 == 99: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100)) running_loss = 0.0 # Test the neural network model correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total)) ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值