Naive Container 发布1.0版本

组件装配实例
本文提供了一个简单的组件装配示例,展示了如何通过不同的类调用通用组件并执行特定操作,如坠入爱河、接吻和结婚等。代码示例中包含了Susan、Lily和Lucy三个角色,它们分别与不同的Boy对象交互。
二进制文件和源代码可以从这里下载到:
[url]http://naive.container.googlepages.com/home[/url]

存在的意义:
最简单原始的组件装配

使用:

public class Susan : ContainerBound
{
public void FallInLove()
{
Console.WriteLine("Susan has fallen in love with " + Get<Boy>().Name);
}
}



public class Lily : ContainerBound
{
public void Kiss()
{
Console.WriteLine("Lily is kissing {0}", Get<Boy>().Name);
}
}



public class Lucy : ContainerBound
{
public void Marry()
{
Console.WriteLine("Lucy is marrying " + Get<Boy>().Name);
}
}


配置:

Containers.GetContainerInContext<object>().Put(new GenericBoy("Van"));

Containers.GetContainerInContext<Lucy>().Put(new GenericBoy("Tom"));

Containers.GetContainerInContext<Lily>().Put(new GenericBoy("Joy"));

Containers.Close();


更多请参见项目主页
你这个代码能不能根据我给的原来的代码模板来写,只把需要补充的代码写在相应的位置,其他的模板代码不要改动import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from copy import deepcopy import os # 首先下载群里提供的.zip压缩包,然后在Kaggle中导入ipynb,导入完成后选择Edit # 在左侧的Input界面下选择Upload,将压缩包上传并为数据集命名,提交后等待上传 # 上传成功之后,Input下面会显示你新加的数据集,将鼠标悬停在数据集上,点右边的复制标志,复制URL data_dir = '/kaggle/input/test11' # 将上面的URL替换为从你上传的数据集复制到的URL import torch import torchvision import torchvision.transforms as transforms # Data transforms: convert images to tensors and normalize them transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # Download and construct the CIFAR-10 dataset. train_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=True, transform=transform, download=False) test_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=False, transform=transform, download=False) print(f'CIFAR-10 training dataset has {len(train_dataset)} images, and test dataset has {len(test_dataset)} images.') # Split the training dataset into training and validation datasets. num_classes = 10 num_training = 49000 num_validation = 1000 val_dataset = deepcopy(torch.utils.data.Subset(train_dataset, range(num_training, num_training + num_validation))) train_dataset = torch.utils.data.Subset(train_dataset, range(num_training)) # Define batch size and create data loaders. batch_size = 64 # Data loader (this provides queues and threads in a very simple way). train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # Define the device. device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using {device} device") # Define the training loop with the learning rate scheduler def train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device, print_step=100): for epoch in range(num_epochs): print(f"Epoch {epoch+1}\n-------------------------------") model.train() size = len(train_loader.dataset) for batch, (X, y) in enumerate(train_loader): X, y = X.to(device), y.to(device) optimizer.zero_grad() pred = model(X) loss = loss_fn(pred, y) loss.backward() optimizer.step() if batch % print_step == 0: loss_val, current = loss.item(), (batch+1)*len(X) print(f"loss: {loss_val:>7f} [{current:>5d}/{size:>5d}]") # Trigger the scheduler step and print info if learning rate is reduced old_lr = optimizer.param_groups[0]['lr'] lr_scheduler.step() new_lr = optimizer.param_groups[0]['lr'] if new_lr < old_lr: print(f"\nLearning rate reduced from {old_lr} to {new_lr}.\n") accuracy = test(model, val_loader, device) return loss, accuracy def test(model, test_loader, device): # Test the neural network correct = 0 total = 0 # Set the model to evaluation mode model.eval() # Disable gradient calculation with torch.inference_mode(): for inputs, labels in test_loader: # Move the inputs and labels to the GPU if available inputs = inputs.to(device) labels = labels.to(device) # Forward pass outputs = model(inputs) # Get the predicted class _, predicted = torch.max(outputs, dim=1) # output size is (batch_size, num_classes) # Update the total number of samples and correct predictions total += labels.size(0) correct += (predicted == labels).sum().item() # Calculate the accuracy accuracy = 100 * correct / total print(f"Accuracy: {accuracy:.2f}%\n") return accuracy # Define the cross-entropy loss function for classification loss_fn = nn.CrossEntropyLoss() class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) # Transfer the model to the device. model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) # Help function for the test def test_optimizers(model, optimizer, num_epochs=5): # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') accuracy = test(model, test_loader, device) return accuracy # Initialize three models to train with different optimizers models = {"SGD_Momentum": CNN_Net().to(device), "RMSprop": CNN_Net().to(device), "Adam": CNN_Net().to(device) } ################################################################################ # TODO: # # Define your optimizers here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** optimizers = {"SGD_Momentum": torch.optim.SGD(models["SGD_Momentum"].parameters()), # Replace it with the correct optimizer and your choice of hyperparameters! "RMSprop": torch.optim.SGD(models["SGD_Momentum"].parameters()), # Replace it with the correct optimizer and your choice of hyperparameters! "Adam": torch.optim.SGD(models["SGD_Momentum"].parameters()) # Replace it with the correct optimizer and your choice of hyperparameters! } # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Train the model with different optimizers and compare the test accuracy num_epochs = 5 accuracy = {} for optimizer_name, optimizer in optimizers.items(): print(f"Testing {optimizer_name} optimizer:") accuracy[optimizer_name] = test_optimizers(models[optimizer_name], optimizer, num_epochs) for optimizer_name, acc in accuracy.items(): print(f"Accuracy of {optimizer_name} optimizer: {acc:.2f}%") # Help function for the test def test_lr_schedulers(model, optimizer, lr_scheduler, num_epochs=10): train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') accuracy = test(model, test_loader, device) return accuracy # Initialize two models to train with different learning rate schedulers models = {"StepLR": CNN_Net().to(device), "CosineLR": CNN_Net().to(device), } num_epochs = 10 ################################################################################ # TODO: # # Define your optimizer and learning rate schedulers here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Define the best optimizer you got from above, the same for each scheduler optimizers = {"StepLR": torch.optim.SGD(models["SGD_Momentum"].parameters()), # Replace it with the correct optimizer and your choice of hyperparameters! "CosineLR": torch.optim.SGD(models["SGD_Momentum"].parameters()), # Replace it with the correct optimizer and your choice of hyperparameters! } # Define the two learning rate schedulers schedulers = {"StepLR": torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0), # Replace it with the correct learning rate scheduler and your choice of hyperparameters! "CosineLR": torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0), # Replace it with the correct learning rate scheduler and your choice of hyperparameters! } # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Train the model with different learning rate schedulers and compare the test accuracy accuracy = {} for name, scheduler in schedulers.items(): print(f"Testing {name} scheduler:") accuracy[name] = test_lr_schedulers(models[name], optimizers[name], scheduler, num_epochs) for name, acc in accuracy.items(): print(f"Accuracy of {name} scheduler: {acc:.2f}%") ################################################################################ # TODO: # # Define and test a better data augmentation strategy, e.g. random crop, # # random flip, etc. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Define the data augmentation strategy train_transform = transforms.Compose([ , # Randomly crop a portion of the image and resize it to 32x32 , # Randomly flip the image horizontally with a probability of 50% transforms.ToTensor(), # Convert the image to a PyTorch Tensor transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) # Normalize the image ]) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Download and construct the CIFAR-10 dataset with the new transform defined above. train_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=True, transform=train_transform, download=True) # Split the training dataset into training and validation datasets again. train_dataset = torch.utils.data.Subset(train_dataset, range(num_training)) # Define the data loader with the new training dataset train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) model = CNN_Net() model = model.to(device) num_epochs = 10 optimizer = # Define the best optimizer you got from above lr_scheduler = # Define the best learning rate scheduler you got from above train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here, with five convolution layers. # # This is the baseline network without skip connections. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) num_epochs = 10 optimizer = # Define the best optimizer you got from above lr_scheduler = # Define the best learning rate scheduler you got from above train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results for the baseline network:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here with skip connection. # # Hint: to apply skip connection, you may need to define each layer # # separately instead of using a single nn.Sequential() container. # # Note that to make the residual addition compatible, you may need to use # # additional 1x1 conv to change the channel size. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # # Hint: as explained above, get the identity value after # # conv1-norm1-act1-pool1, and add a shortcut x = x + identity between # # conv4-norm4 and act4-pool2. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) num_epochs = 10 optimizer = # Define the best optimizer you got from above lr_scheduler = # Define the best learning rate scheduler you got from above train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results for the network with skip connection:') test(model, test_loader, device)
07-07
这是你之前给我写的吧import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from copy import deepcopy import os # 首先下载群里提供的.zip压缩包,然后在Kaggle中导入ipynb,导入完成后选择Edit # 在左侧的Input界面下选择Upload,将压缩包上传并为数据集命名,提交后等待上传 # 上传成功之后,Input下面会显示你新加的数据集,将鼠标悬停在数据集上,点右边的复制标志,复制URL data_dir = '/kaggle/input/test11' # 将上面的URL替换为从你上传的数据集复制到的URL import torch import torchvision import torchvision.transforms as transforms # Data transforms: convert images to tensors and normalize them transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # Download and construct the CIFAR-10 dataset. train_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=True, transform=transform, download=False) test_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=False, transform=transform, download=False) print(f'CIFAR-10 training dataset has {len(train_dataset)} images, and test dataset has {len(test_dataset)} images.') # Split the training dataset into training and validation datasets. num_classes = 10 num_training = 49000 num_validation = 1000 val_dataset = deepcopy(torch.utils.data.Subset(train_dataset, range(num_training, num_training + num_validation))) train_dataset = torch.utils.data.Subset(train_dataset, range(num_training)) # Define batch size and create data loaders. batch_size = 64 # Data loader (this provides queues and threads in a very simple way). train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # Define the device. device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using {device} device") # Define the training loop with the learning rate scheduler def train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device, print_step=100): for epoch in range(num_epochs): print(f"Epoch {epoch+1}\n-------------------------------") model.train() size = len(train_loader.dataset) for batch, (X, y) in enumerate(train_loader): X, y = X.to(device), y.to(device) optimizer.zero_grad() pred = model(X) loss = loss_fn(pred, y) loss.backward() optimizer.step() if batch % print_step == 0: loss_val, current = loss.item(), (batch+1)*len(X) print(f"loss: {loss_val:>7f} [{current:>5d}/{size:>5d}]") # Trigger the scheduler step and print info if learning rate is reduced old_lr = optimizer.param_groups[0]['lr'] lr_scheduler.step() new_lr = optimizer.param_groups[0]['lr'] if new_lr < old_lr: print(f"\nLearning rate reduced from {old_lr} to {new_lr}.\n") accuracy = test(model, val_loader, device) return loss, accuracy def test(model, test_loader, device): # Test the neural network correct = 0 total = 0 # Set the model to evaluation mode model.eval() # Disable gradient calculation with torch.inference_mode(): for inputs, labels in test_loader: # Move the inputs and labels to the GPU if available inputs = inputs.to(device) labels = labels.to(device) # Forward pass outputs = model(inputs) # Get the predicted class _, predicted = torch.max(outputs, dim=1) # output size is (batch_size, num_classes) # Update the total number of samples and correct predictions total += labels.size(0) correct += (predicted == labels).sum().item() # Calculate the accuracy accuracy = 100 * correct / total print(f"Accuracy: {accuracy:.2f}%\n") return accuracy # Define the cross-entropy loss function for classification loss_fn = nn.CrossEntropyLoss() class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(256) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(256 * 8 * 8, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x = self.pool2(x) x = x.view(-1, 256 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) # Transfer the model to the device. model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn5 = nn.BatchNorm2d(256) self.fc1 = nn.Linear(256 * 8 * 8, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x = self.pool2(x) x = torch.relu(self.bn5(self.conv5(x))) x = x.view(-1, 256 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn5 = nn.BatchNorm2d(256) self.conv6 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(512) self.conv7 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(512) self.fc1 = nn.Linear(512 * 2 * 2, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x = self.pool2(x) x = torch.relu(self.bn5(self.conv5(x))) x = torch.relu(self.bn6(self.conv6(x))) x = torch.relu(self.bn7(self.conv7(x))) x = x.view(-1, 512 * 2 * 2) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here with skip connection. # # Hint: to apply skip connection, you may need to define each layer # # separately instead of using a single nn.Sequential() container. # # Note that to make the residual addition compatible, you may need to use # # additional 1x1 conv to change the channel size. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn5 = nn.BatchNorm2d(256) self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(256) self.shortcut1 = nn.Conv2d(64, 64, kernel_size=1, stride=1) self.shortcut2 = nn.Conv2d(128, 128, kernel_size=1, stride=1) self.fc1 = nn.Linear(256 * 8 * 8, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # # Hint: as explained above, get the identity value after # # conv1-norm1-act1-pool1, and add a shortcut x = x + identity between # # conv4-norm4 and act4-pool2. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** identity = x x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x += identity identity = x x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x += identity identity = x x = self.pool2(x) x = torch.relu(self.bn5(self.conv5(x))) x = torch.relu(self.bn6(self.conv6(x))) x += identity x = x.view(-1, 256 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
07-07
这个为什么在kaggle运行失败了import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from copy import deepcopy import os # 首先下载群里提供的.zip压缩包,然后在Kaggle中导入ipynb,导入完成后选择Edit # 在左侧的Input界面下选择Upload,将压缩包上传并为数据集命名,提交后等待上传 # 上传成功之后,Input下面会显示你新加的数据集,将鼠标悬停在数据集上,点右边的复制标志,复制URL data_dir = '/kaggle/input/test11' # 将上面的URL替换为从你上传的数据集复制到的URL import torch import torchvision import torchvision.transforms as transforms # Data transforms: convert images to tensors and normalize them transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # Download and construct the CIFAR-10 dataset. train_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=True, transform=transform, download=False) test_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=False, transform=transform, download=False) print(f'CIFAR-10 training dataset has {len(train_dataset)} images, and test dataset has {len(test_dataset)} images.') # Split the training dataset into training and validation datasets. num_classes = 10 num_training = 49000 num_validation = 1000 val_dataset = deepcopy(torch.utils.data.Subset(train_dataset, range(num_training, num_training + num_validation))) train_dataset = torch.utils.data.Subset(train_dataset, range(num_training)) # Define batch size and create data loaders. batch_size = 64 # Data loader (this provides queues and threads in a very simple way). train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # Define the device. device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using {device} device") # Define the training loop with the learning rate scheduler def train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device, print_step=100): for epoch in range(num_epochs): print(f"Epoch {epoch+1}\n-------------------------------") model.train() size = len(train_loader.dataset) for batch, (X, y) in enumerate(train_loader): X, y = X.to(device), y.to(device) optimizer.zero_grad() pred = model(X) loss = loss_fn(pred, y) loss.backward() optimizer.step() if batch % print_step == 0: loss_val, current = loss.item(), (batch+1)*len(X) print(f"loss: {loss_val:>7f} [{current:>5d}/{size:>5d}]") # Trigger the scheduler step and print info if learning rate is reduced old_lr = optimizer.param_groups[0]['lr'] lr_scheduler.step() new_lr = optimizer.param_groups[0]['lr'] if new_lr < old_lr: print(f"\nLearning rate reduced from {old_lr} to {new_lr}.\n") accuracy = test(model, val_loader, device) return loss, accuracy def test(model, test_loader, device): # Test the neural network correct = 0 total = 0 # Set the model to evaluation mode model.eval() # Disable gradient calculation with torch.inference_mode(): for inputs, labels in test_loader: # Move the inputs and labels to the GPU if available inputs = inputs.to(device) labels = labels.to(device) # Forward pass outputs = model(inputs) # Get the predicted class _, predicted = torch.max(outputs, dim=1) # output size is (batch_size, num_classes) # Update the total number of samples and correct predictions total += labels.size(0) correct += (predicted == labels).sum().item() # Calculate the accuracy accuracy = 100 * correct / total print(f"Accuracy: {accuracy:.2f}%\n") return accuracy # Define the cross-entropy loss function for classification loss_fn = nn.CrossEntropyLoss() class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(256) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(256 * 8 * 8, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x = self.pool2(x) x = x.view(-1, 256 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) # Transfer the model to the device. model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn5 = nn.BatchNorm2d(256) self.fc1 = nn.Linear(256 * 8 * 8, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x = self.pool2(x) x = torch.relu(self.bn5(self.conv5(x))) x = x.view(-1, 256 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn5 = nn.BatchNorm2d(256) self.conv6 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(512) self.conv7 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(512) self.fc1 = nn.Linear(512 * 2 * 2, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x = self.pool2(x) x = torch.relu(self.bn5(self.conv5(x))) x = torch.relu(self.bn6(self.conv6(x))) x = torch.relu(self.bn7(self.conv7(x))) x = x.view(-1, 512 * 2 * 2) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = CNN_Net() print(model) model = model.to(device) # Define the learning parameters learning_rate = 0.01 num_epochs = 5 # Define a naive SGD optimizer optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Define a simple constant learning rate scheduler: either to use very large step size to avoid reducing learning rate, or use gamma=1.0 to keep the same learning rate. lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.0) train_and_val_with_lr_scheduler(model, train_loader, val_loader, loss_fn, optimizer, lr_scheduler, num_epochs, device) print('\nTest results:') test(model, test_loader, device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() ################################################################################ # TODO: # # Define your new network here with skip connection. # # Hint: to apply skip connection, you may need to define each layer # # separately instead of using a single nn.Sequential() container. # # Note that to make the residual addition compatible, you may need to use # # additional 1x1 conv to change the channel size. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.bn5 = nn.BatchNorm2d(256) self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(256) self.shortcut1 = nn.Conv2d(64, 64, kernel_size=1, stride=1) self.shortcut2 = nn.Conv2d(128, 128, kernel_size=1, stride=1) self.fc1 = nn.Linear(256 * 8 * 8, 1024) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(1024, 10) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** def forward(self, x): ################################################################################ # TODO: # # Define your forward function here. # # Hint: as explained above, get the identity value after # # conv1-norm1-act1-pool1, and add a shortcut x = x + identity between # # conv4-norm4 and act4-pool2. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** identity = x x = torch.relu(self.bn1(self.conv1(x))) x = torch.relu(self.bn2(self.conv2(x))) x += identity identity = x x = self.pool1(x) x = torch.relu(self.bn3(self.conv3(x))) x = torch.relu(self.bn4(self.conv4(x))) x += identity identity = x x = self.pool2(x) x = torch.relu(self.bn5(self.conv5(x))) x = torch.relu(self.bn6(self.conv6(x))) x += identity x = x.view(-1, 256 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
07-07
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值