import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
# 定义模型
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc = nn.Linear(10, 1)
def forward(self, x):
return self.fc(x)
# 定义训练函数
def train(model, device, data):
optimizer = optim.SGD(model.parameters(), lr=0.01)
criterion = nn.MSELoss()
x, y = data
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
output = model(x)
loss = criterion(output, y)
loss.backward()
optimizer.step()
# 定义并行训练函数
def parallel_train(model, data):
ngpus = torch.cu