深度学习——线性回归(一)

一、线性回归实现(从零开始)

数据生成

import random
import torch
import matplotlib.pyplot as plt


#***************** 1.数据生成函数 *****************
def synthetic_data(w = torch.tensor([2, -3.4]), b = 4.2, num_examples = 1000):
    x = torch.normal(0, 1, (num_examples, len(w)))
    y = torch.matmul(x, w) + b
    y += torch.normal(0 , 0.01, y.shape)
    return x, y.reshape(-1, 1)

读取数据集


#***************** 2.读取数据集 *****************
def data_iter(batch_size, features, label):
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        batch_indices = torch.tensor(indices[i :min(i + batch_size ,num_examples)])
        yield features[batch_indices], label[batch_indices]

def data_iter_test():
    batch_size = 10
    x , y = synthetic_data(); 
    for x, y in data_iter(batch_size, x, y):
        print(x, '\n', y)
data_iter_test()

初始化模型参数

w = torch.normal(0,0.01, size=(2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)

# w = torch.normal(0,0.01, size=(2,1), requires_grad=True)
w = torch.zeros(size=(2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)

定义线性回归模型

def Line_regression(x, w, b):
    """ 线性回归模型 """
    return torch .matmul(x, w) + b

损失函数

def squared_loss(y_predict, y):
    """" 均方损失函数 """
    return (y_predict - y.reshape(y_predict.shape)) ** 2 / 2

优化算法

def sgd(params, lr, batch_size):
    """" 小批量随机梯度下降 """
    with torch.no_grad():
        for param in params:
            param -= lr * param.grad / batch_size
            param.grad.zero_()

模型训练

lr = 0.02
num_epochs = 4
batch_size = 10

net = Line_regression
loss = squared_loss

true_w = w
true_b = b
def Model_train():
    train_x , train_y = synthetic_data(); 
    for epoch in range(num_epochs):
        for x, y in data_iter(batch_size, features=train_x, label=train_y):
            l = loss(net(x, w, b), y)         # 小批量损失
            l.sum().backward()
            sgd([w, b], lr, batch_size)
        with torch.no_grad():
            train_l = loss(net(train_x, w ,b), train_y)
            print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
    print(f'w的估计误差: {(true_w - w)}')
    print(f'b的估计误差: {true_b - b}')
Model_train()

二、线性回归实现(调用torch库实现)

获取数据

import numpy as np
import torch
from torch.utils import data
# 数据生成
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)

# 获取数据
def load_array(data_arrays, batch_size, is_train = True):
    dataset = data.TensorDataset(*data_arrays)
    return data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=is_train)

batch_size = 10
data_iter = load_array((features, labels), batch_size)

print(next(iter(data_iter)))

线性模型建立

Sequential类将多个层串联在⼀起。当给定输⼊数据时, Sequential实例将数据传⼊到第⼀层,然后将第⼀层的输出作为第⼆层的输⼊,以此类推。下面建立一个一层的全连接层,并对参数进行初始化

from torch import nn
net = nn.Sequential(nn.Linear(2,1))

net[0].weight.data.normal_(0, 0.01)        #初始权值w
net[0].bias.data.fill_(0)                  #初始偏置b

定义损失函数

损失函数有很多种,其中常用的有平方损失、均方损失、L1范数、L2范数等

    L1Loss:平均绝对误差 (MAE)
    NLLLoss:The negative log likelihood loss
    PoissonNLLLoss:Negative log likelihood loss with Poisson distribution of target
    GaussianNLLLoss:Gaussian negative log likelihood loss
    KLDivLoss:The Kullback-Leibler divergence loss
    MSELoss: the mean squared error (squared L2 norm)
    BCELoss:Creates a criterion that measures the Binary Cross Entropy between the target and the input probabilities
loss = nn.MSELoss()
# 优化算法
trainer = torch.optim.SGD(net.parameters(), lr = 0.03)

训练模型

num_epochs = 3
print(f'w:{net[0].weight},b:{net[0].bias}')
for epoch in range(num_epochs):
    for x, y in data_iter:
        l = loss(net(x), y)
        trainer.zero_grad()
        l.backward()
        trainer.step()
    l = loss(net(features), labels)
    print(f'epoch{epoch + 1}, loss{l :f}')
print(f'w:{net[0].weight},b:{net[0].bias}')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

君逸~~

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值