import torch import torch.nn as nn import torch.utils.data as Data import torch.optim as optim import torch.nn.init as init def data_init(): true_w = [2, -3.4] true_b = 3 num_features = 2 num_data = 1000 num_output = 1 features = torch.normal(0, 1, (num_data, num_features)) labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b e = torch.normal(0, 0.01, size=labels.size()) labels += e return features, labels, num_features, num_output def linear_test(): features, labels, num_features, num_output = data_init() # get data set data_set = Data.TensorDataset(features, labels) batch_size = 10 # get data iterator data_iter = Data.DataLoader(data_set, batch_size, shuffle=True) # define hyper parameter epoch = 3 lr = 0.03 # define model net = nn.Sequential() net.add_module('linear1', nn.Linear(num_features, num_output)) # init model parameter init.normal_(net[0].weight, 0, 0.01) init.constant_(net[0].bias, val=0) # define loss function loss_func = nn.MSELoss() # define optimization optimization = optim.SGD(net[0].parameters(), lr=lr) # begin train for i in range(1, epoch+1): loss = 0 for x, y in data_iter: # get the predict result out_put = net(x) # calculate loss loss = loss_func(out_put, y.view(-1, 1)) # backpropagation loss.backward() # optimize parameters optimization.step() # clear gradient optimization.zero_grad() print('epoch', i, 'loss:', loss) dense = net[0] print(dense.weight) print(dense.bias) if __name__ == '__main__': print('start') linear_test()