转载自:
https://zhuanlan.zhihu.com/p/27783097
https://www.jianshu.com/p/5d5d3957f684
ReLu 函数求导示例:
# -*- coding:utf8 -*-
import torch
from torch.autograd import Variable
class MyReLU(torch.autograd.Function):
def forward(self, input_):
# 在forward中,需要定义MyReLU这个运算的forward计算过程
# 同时可以保存任何在后向传播中需要使用的变量值
self.save_for_backward(input_) # 将输入保存起来,在backward时使用
output = input_.clamp(min=0) # relu就是截断负数,让所有负数等于0
return output
def backward(self, grad_output):
# 根据BP算法的推导(链式法则),dloss / dx = (dloss / doutput) * (doutput / dx)
# dloss / doutput就是输入的参数grad_output、
# 因此只需求relu的导数,在乘以grad_outpu
input_, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input_ < 0] = 0 # 上诉计算的结果就是左式。即ReLU在反向传播中可以看做一个通道选择函数,所有未达到阈值(激活值<0)的单元的梯度都为0
return grad_input
# Wrap一个ReLU函数
# 可以直接把刚才自定义的ReLU类封装成一个函数,方便直接调用
def relu(input_):
# MyReLU()是创建一个MyReLU对象,
# Function类利用了Python __call__操作,使得可以直接使用对象调用__call__制定的方法
# __call__指定的方法是forward,因此下面这句MyReLU()(input_)相当于
# return MyReLU().forward(input_)
return MyReLU()(input_)
input_ = Variable(torch.linspace(-3, 3, steps=5))
print input_
print relu(input_)
# input_ = Variable(torch.randn(1))
# relu = MyReLU()
# output_ = relu(input_)
#
# # 这个relu对象,就是output_.creator,即这个relu对象将output与input连接起来,形成一个计算图
# print relu
# print output_.creator
在pytorch库中:
import math
import torch
import torch.nn as nn
from torch.autograd import Function
class ReLUF(Function):
@staticmethod
def forward(cxt, input):
cxt.save_for_backward(input)
output = input.clamp(min=0)
return output
@staticmethod
def backward(cxt, grad_output):
input, = cxt.saved_variables
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
class LinearF(Function):
@staticmethod
def forward(cxt, input, weight, bias=None):
cxt.save_for_backward(input, weight, bias)
output = input.mm(weight.t())
if bias is not None:
output += bias
return output
@staticmethod
def backward(cxt, grad_output):
input, weight, bias = cxt.saved_variables
grad_input = grad_weight = grad_bias = None
if cxt.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if cxt.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and cxt.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
if bias is not None:
return grad_input, grad_weight, grad_bias
else:
return grad_input, grad_weight
# aliases
relu = ReLUF.apply
linear = LinearF.apply
# simple test
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1111)
a = torch.randn(2, 3)
va = Variable(a, requires_grad=True)
vb = relu(va)
print va.data, vb.data
vb.backward(torch.ones(va.size()))
print va.grad.data
pytorch中文文档的说明:
https://pytorch-cn.readthedocs.io/zh/latest/notes/extending/
其他博客讲解:
https://blog.youkuaiyun.com/Hungryof/article/details/78346304
https://blog.youkuaiyun.com/u012436149/article/details/78829329
https://blog.youkuaiyun.com/tsq292978891/article/details/79364140