def train(net, train_iter, val_iter, num_epochs, lr, wd, devices, lr_period, lr_decay): global val_acc, metric trainer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=wd) scheduler = torch.optim.lr_scheduler.StepLR(trainer, lr_period, lr_decay) num_batches, timer = len(train_iter), d2l.Timer() train_loss, train_accs, val_accs = [], [], [] for epoch in range(num_epochs): net.train() metric = d2l.Accumulator(3) for i, (features, labels) in enumerate(train_iter): trainer.zero_grad() features, labels = features.to(devices[0]), labels.to(devices[0]) l, acc = d2l.train_batch_ch13(net, features, labels, loss, trainer, devices) metric.add(l, acc, labels.shape[0]) train_loss.append(metric[0] / metric[2]) train_accs.append(metric[1] / metric[2]) if val_iter is not None: val_acc = d2l.evaluate_accuracy_gpu(net, val_iter) val_accs.append(val_acc) d2l.plot(range(1, epoch + 2), [train_loss, train_accs, val_accs], xlabel='epoch', legend=['train loss', 'train acc', 'val acc'], figsize=(8, 6)) scheduler.step() RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same 修改代码