自动求导与简单逻辑回归的实现
1.自动求导 torch.autograd.backward()
"""
torch.autograd.backward(
tensor, 用于求导的张量
grad_tensors, 多梯度权重
retain_graph,保存计算图
create_graph 创建导数计算图,用于高阶求导
)
"""
# y=(x+w)*(w+1)
# a=x+w
# b=w+1
# y=a*b
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)
a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)
y.backward()
print(w.grad)
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)
a = torch.add(w, x)
b = torch.add(w, 1)
y0 = torch.mul(a, b)
y1 = torch.add(a, b)
loss = torch.cat([y0, y1], dim=0)
grad_t = torch.tensor([1., 1.]) #多梯度权重
loss.backward(gradient=grad_t)
print(w.grad)
2.求取梯度 torch.autograd.grad()
"""
torch.autograd.grad( 用于求取梯度
outputs, 用于求导的张量
inputs, 需要梯度的张量
grad_outputs, 多梯度权重
retain_graph, 保存计算图
create_graph 创建导数计算图
)
"""
x = torch.tensor([3.], requires_grad=True)
y = torch.pow(x, 2)
grad_t = torch.autograd.grad(y, x, create_graph=True) #create_graph是用来创建导数的计算图 这样才能再一次求导
print(grad_t)
grad_t1 = torch.autograd.grad(grad_t[0], x)
print(grad_t1)
①梯度不会自动清0
用到grad.zero_()清0
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)
for i in range(2):
a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)
y.backward()
print(w.grad)
输出:
# tensor([5.])
# tensor([10.])
②依赖于叶子结点的结点 requires_grad 默认=True
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)
a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)
print(w.requires_grad,x.requires_grad,a.requires_grad,b.requires_grad,y.requires_grad)
③叶子结点不可以执行in-place
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)
a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)
w.add_(1)
y.backward()
由于进行了w.add_(1),y.backward()会报错
3.计算的逻辑回归实现
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
torch.manual_seed(10)
# 1.生成数据
sample_nums = 100 # 数据个数
mean_value = 1.7
bias = 1
n_data = torch.ones(sample_nums, 2)
x0 = torch.normal(mean_value * n_data, 1) + bias # 类别0 数据shape(100,2)
y0 = torch.zeros(sample_nums) # 类别0 标签shape(100,1)
x1 = torch.normal(-mean_value * n_data, 1) + bias # 类别1数据shape(100,2)
y1 = torch.ones(sample_nums) # 类别1标签shape(100,1)
train_x = torch.cat((x0, x1), 0)
train_y = torch.cat((y0, y1), 0)
# 2.选择模型
class LR(nn.Module):
def __init__(self):
super(LR, self).__init__()
self.features = nn.Linear(2, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.features(x)
x = self.sigmoid(x)
return x
lr_net = LR() # 实例化逻辑回归模型
# 3.选择损失函数
loss_fn = nn.BCELoss()
# 4.选择优化器
lr = 0.01
optimizer = torch.optim.SGD(lr_net.parameters(), lr=lr, momentum=0.9)
# 5.训练模型
for i in range(1000):
# 前向传播
y_pred = lr_net(train_x)
# 计算loss
loss = loss_fn(y_pred.squeeze(), train_y)
# 反向传播
loss.backward()
optimizer.step()
# 清空梯度
optimizer.zero_grad()
if i % 20 == 0:
mask = y_pred.ge(0.5).float().squeeze() # 生成一个mask当y_pred大于0.5的时候为true
correct = (mask == train_y).sum()
acc = correct.item() / train_y.size(0)
plt.scatter(x0.data.numpy()[:, 0], x0.data.numpy()[:, 1], c='r', label='class 0')
plt.scatter(x1.data.numpy()[:, 0], x1.data.numpy()[:, 1], c='b', label='class 1')
w0, w1 = lr_net.features.weight[0]
w0, w1 = float(w0.item()), float(w1.item())
plot_b = float(lr_net.features.bias[0].item())
plot_x = np.arange(-6, 6, 0.1)
plot_y = (-w0 * plot_x - plot_b) / w1
plt.xlim(-5, 7)
plt.ylim(-7, 7)
plt.plot(plot_x, plot_y)
plt.text(-5, 5, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.title("Iteration: {}\nw0:{:.2f} w1:{:.2f} b: {:.2f} accuracy:{:.2%}".format(iteration, w0, w1, plot_b, acc))
plt.legend()
plt.show()
plt.pause(0.5)
if acc > 0.99:
break