优化算法进阶
Momentum在这里插入代码片
def momentum_2d(x1, x2, v1, v2):
v1 = beta * v1 + eta * 0.2 * x1
v2 = beta * v2 + eta * 4 * x2
return x1 - v1, x2 - v2, v1, v2
eta, beta = 0.4, 0.5
d2l.show_trace_2d(f_2d, d2l.train_2d(momentum_2d))
eta = 0.6
d2l.show_trace_2d(f_2d, d2l.train_2d(momentum_2d))
Exponential Moving Average
def get_data_ch7():
data = np.genfromtxt('/home/kesci/input/airfoil4755/airfoil_self_noise.dat', delimiter='\t')
data = (data - data.mean(axis=0)) / data.std(axis=0)
return torch.tensor(data[:1500, :-1], dtype=torch.float32), \
torch.tensor(data[:1500, -1], dtype=torch.float32)
features, labels = get_data_ch7()
def init_momentum_states():
v_w = torch.zeros((features.shape[1], 1), dtype=torch.float32)
v_b = torch.zeros(1, dtype=torch.float32)
return (v_w, v_b)
def sgd_momentum(params, states, hyperparams):
for p, v in zip(params, states):
v.data = hyperparams['momentum'] * v.data + hyperparams['lr'] * p.grad.data
p.data -= v.data
d2l.train_ch7(sgd_momentum, init_momentum_states(),
{
'lr': 0.02, 'momentum': 0.5}, features, labels)
pytorch class
d2l.train_pytorch_ch7(torch.optim.SGD, {
'lr': 0.004, 'momentum': 0.9},
features, labels)
AdaGrad
%matplotlib inline
import math
import torch
import sys
sys.path.append("/home/kesci/input")
import