import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# 定义神经网络结构
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128) # 输入层到隐藏层
self.fc2 = nn.Linear(128, 10) # 隐藏层到输出层
def forward(self, x):
x = x.view(-1, 28 * 28) # 扁平化输入
x = torch.relu(self.fc1(x)) # 隐藏层激活函数
x = self.fc2(x) # 输出层
return x
# 实例化模型
model = SimpleNN()
# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 加载数据(以 MNIST 为例)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
# 训练模型
epochs = 5
for epoch in range(epochs):
model.train()
for inputs, labels in train_loader:
optimizer.zero_grad() # 清除梯度
# 前向传播
outputs = model(inputs)
# 计算损失
loss = criterion(outputs, labels)
# 反向传播,计算每个参数的梯度并把结果保存在每个张量的.grad属性中
loss.backward()
# 更新参数(来使用计算出的梯度更新模型的参数)
optimizer.step()
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')