optimizer=SGD(0.88)
optimizer=AdaGrad(0.88)
可以看出,用AdaGrad方法,使得学习路径比较平缓,更快速地到达梯度最小点
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 09:04:14 2019
@author: Administrator
"""
import sys, os
sys.path.append(os.pardir)
import numpy as np
from common.gradient import numerical_gradient
import matplotlib.pylab as plt
class simple:
def __init__(self,x,y):
self.x=x
self.y=y
def f(self):
z=np.sum((s