pytorch实现一个简单的神经网络
1.numpy和pytorch实现梯度下降法
- 设定初始值
- 求取梯度
- 在梯度方向上进行参数的更新
numpy
x = 0
# 学习率
learning_rate= 0.1
# 迭代次数
epochs = 20
# lambda函数定义一个简单的函数,假装是在算loss :)
y = lambda x: x**2 + 2*x +1
for epoch in range(epochs):
dx = 2*x +2 #梯度
x = x - learning_rate*dx #在梯度上进行更新
print('x:',x,'y:',y(x))
x: -0.2 y: 0.64
x: -0.36000000000000004 y: 0.40959999999999996
x: -0.488 y: 0.26214400000000004
x: -0.5904 y: 0.16777215999999995
x: -0.67232 y: 0.10737418239999996
x: -0.7378560000000001 y: 0.06871947673599998
x: -0.7902848 y: 0.043980465111040035
x: -0.83222784 y: 0.028147497671065613
x: -0.865782272 y: 0.018014398509481944
x: -0.8926258176 y: 0.011529215046068408
x: -0.9141006540800001 y: 0.0073786976294838436
x: -0.931280523264 y: 0.004722366482869611
x: -0.9450244186112 y: 0.0030223145490365644
x: -0.95601953488896 y: 0.0019342813113834012
x: -0.9648156279111679 y: 0.0012379400392853457
x: -0.9718525023289344 y: 0.0007922816251426656
x: -0.9774820018631475 y: 0.0005070602400912838
x: -0.981985601490518 y: 0.0003245185536584483
x: -0.9855884811924144 y: 0.00020769187434144243
x: -0.9884707849539315 y: 0.00013292279957843878
Pytorch
import torch
from torch.autograd import Variable
# 定义一个pytorch类型 且可自动求导的的初始值
x = torch.Tensor([0])# 定义一个tensor,相当于np.array
x = Variable(x,requires_grad=True) # x转变为一个variable,建立计算图的起点;开启requires_grad表示自动计算梯度
print('grad',x.grad,'data',x.data) # grad表示x的梯度属性,表明当前累计的梯度;data表示tensor值
lr = 0.1
epochs = 20
for epoch