引言
在设计好神经网络结构之后,权重初始化方式会很大程度上影响模型的训练过程和最终效果。权重初始化方式包括ImageNet预训练参数,kaiming_uniform方式以及多种权重初始化方式。神经网络中需要进行参数初始化操作的有Linear,Conv,BN等。
参数初始化命令
1. 不进行初始化操作,系统的默认初始化方法
# Conv{1,2,3}d 都是继承于_ConvNd,其中对于参数的默认初始化方法如下:
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
# Linear
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
# BN{1,2,3}d 都是继承于_BatchNorm,其中对于参数的默认初始化方法如下:
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
2. torch.nn.init.uniform_(tensor, a=0, b=1)
使用均匀分布U(a,b)初始化Tensor,即Tensor的填充值是等概率的范围为 [a,b) 的值。均值为 (a + b)/ 2.
w = torch.empty(2, 3)
nn.init.uniform_(w)
print(w)
3. torch.nn.init.normal_(tensor, mean=0, std=1)
高斯分布(Gaussian distribution),也称正态分布。
若随机变量X服从一个数学期望为μ、方差为σ2的正态分布,记为N(μ,σ^2)。其概率密度函数为正态分布的期望值μ决定了其位置,其标准差σ决定了分布的幅度。当μ = 0,σ = 1时的正态分布是标准正态分布。
4. torch.nn.init.constant_(tensor, val)
将Tensor填充为常量值。
5. torch.nn.init.xavier_uniform_(tensor, gain=1)
w = torch.empty(2,3)
nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
print(w)
tensor([[-0.4091, -1.1049, -0.6557],
[-1.0230, -0.4674, -0.4145]])
6. torch.nn.init.xavier_normal_(tensor, gain=1)
w = torch.empty(2,3)
nn.init.xavier_normal_(w)
print(w)
tensor([[ 1.1797, -0.7723, -1.3113],
[ 0.3550, -0.3806, -0.5848]])
7. torch.nn.init.kaiming_uniform_(tensor, a=0, mode=‘fan_in’, nonlinearity=‘leaky_relu’)
w = torch.empty(2,3)
nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
print(w)
8. torch.nn.init.kaiming_normal_(tensor, a=0, mode=‘fan_in’, nonlinearity=‘leaky_relu’)
w = torch.empty(2,3)
nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
初始化方法
1. 使用For循环进行权重初始化
net = MyNet()
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
2. 使用Apply函数进行权重初始化
apply函数会递归地搜索网络内的所有module,并把参数表示的函数应用到所有的module上。
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
xavier(m.bias.data)
net=MyNet()
net.apply(weights_init_kaiming)
net.apply(weights_init)
参考:
https://blog.youkuaiyun.com/u013978977/article/details/84861453