1.3.随机梯度下降法实战

随机梯度下降法实战

1.构建数据集

y = 2 * x

import numpy as np
X = np.arange(0,50)
X
array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
       17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
       34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49])
# [-5,5]
# 创建噪声值
np.random.seed(1)  #定义一个种子,让每次取到的随机值都是一样的
RandomArray = (np.random.random(50)*2-1)*5
RandomArray
array([-0.82977995,  2.20324493, -4.99885625, -1.97667427, -3.53244109,
       -4.07661405, -3.13739789, -1.54439273, -1.03232526,  0.38816734,
       -0.80805486,  1.852195  , -2.9554775 ,  3.78117436, -4.72612407,
        1.7046751 , -0.82695198,  0.58689828, -3.59613061, -3.01898511,
        3.00744569,  4.68261576, -1.86575822,  1.92322616,  3.76389152,
        3.94606664, -4.14955789, -4.60945217, -3.3016958 ,  3.78142503,
       -4.01653166, -0.78892375,  4.5788953 ,  0.33165285,  1.91877114,
       -1.84484369,  1.86500928,  3.34625672, -4.81711723,  2.50144315,
        4.88861089,  2.48165654, -2.19556008,  2.89279328, -3.96773993,
       -0.52106474,  4.08595503, -2.06385852, -2.12224661, -3.69971428])
y = 2 * X + RandomArray #让y加上了噪声值
y
array([-0.82977995,  4.20324493, -0.99885625,  4.02332573,  4.46755891,
        5.92338595,  8.86260211, 12.45560727, 14.96767474, 18.38816734,
       19.19194514, 23.852195  , 21.0445225 , 29.78117436, 23.27387593,
       31.7046751 , 31.17304802, 34.58689828, 32.40386939, 34.98101489,
       43.00744569, 46.68261576, 42.13424178, 47.92322616, 51.76389152,
       53.94606664, 47.85044211, 49.39054783, 52.6983042 , 61.78142503,
       55.98346834, 61.21107625, 68.5788953 , 66.33165285, 69.91877114,
       68.15515631, 73.86500928, 77.34625672, 71.18288277, 80.50144315,
       84.88861089, 84.48165654, 81.80443992, 88.89279328, 84.03226007,
       89.47893526, 96.08595503, 91.93614148, 93.87775339, 94.30028572])
import matplotlib.pyplot as plt
plt.scatter(X,y)
<matplotlib.collections.PathCollection at 0x1ecd89ffe08>

X.shape,y.shape
((50,), (50,))
X = X.reshape(50,1)
y = y.reshape(50,1)
All_data = np.concatenate((X,y),axis=1)   #拼接数组
All_data
array([[ 0.        , -0.82977995],
       [ 1.        ,  4.20324493],
       [ 2.        , -0.99885625],
       [ 3.        ,  4.02332573],
       [ 4.        ,  4.46755891],
       [ 5.        ,  5.92338595],
       [ 6.        ,  8.86260211],
       [ 7.        , 12.45560727],
       [ 8.        , 14.96767474],
       [ 9.        , 18.38816734],
       [10.        , 19.19194514],
       [11.        , 23.852195  ],
       [12.        , 21.0445225 ],
       [13.        , 29.78117436],
       [14.        , 23.27387593],
       [15.        , 31.7046751 ],
       [16.        , 31.17304802],
       [17.        , 34.58689828],
       [18.        , 32.40386939],
       [19.        , 34.98101489],
       [20.        , 43.00744569],
       [21.        , 46.68261576],
       [22.        , 42.13424178],
       [23.        , 47.92322616],
       [24.        , 51.76389152],
       [25.        , 53.94606664],
       [26.        , 47.85044211],
       [27.        , 49.39054783],
       [28.        , 52.6983042 ],
       [29.        , 61.78142503],
       [30.        , 55.98346834],
       [31.        , 61.21107625],
       [32.        , 68.5788953 ],
       [33.        , 66.33165285],
       [34.        , 69.91877114],
       [35.        , 68.15515631],
       [36.        , 73.86500928],
       [37.        , 77.34625672],
       [38.        , 71.18288277],
       [39.        , 80.50144315],
       [40.        , 84.88861089],
       [41.        , 84.48165654],
       [42.        , 81.80443992],
       [43.        , 88.89279328],
       [44.        , 84.03226007],
       [45.        , 89.47893526],
       [46.        , 96.08595503],
       [47.        , 91.93614148],
       [48.        , 93.87775339],
       [49.        , 94.30028572]])

划分样本为训练集D和测试集V

4:1

np.random.shuffle(All_data)  #打乱All_data
All_data
array([[ 5.        ,  5.92338595],
       [29.        , 61.78142503],
       [ 9.        , 18.38816734],
       [22.        , 42.13424178],
       [33.        , 66.33165285],
       [16.        , 31.17304802],
       [49.        , 94.30028572],
       [35.        , 68.15515631],
       [31.        , 61.21107625],
       [36.        , 73.86500928],
       [18.        , 32.40386939],
       [14.        , 23.27387593],
       [ 4.        ,  4.46755891],
       [41.        , 84.48165654],
       [27.        , 49.39054783],
       [48.        , 93.87775339],
       [46.        , 96.08595503],
       [26.        , 47.85044211],
       [47.        , 91.93614148],
       [11.        , 23.852195  ],
       [12.        , 21.0445225 ],
       [ 2.        , -0.99885625],
       [39.        , 80.50144315],
       [21.        , 46.68261576],
       [20.        , 43.00744569],
       [ 6.        ,  8.86260211],
       [30.        , 55.98346834],
       [44.        , 84.03226007],
       [ 0.        , -0.82977995],
       [ 3.        ,  4.02332573],
       [13.        , 29.78117436],
       [17.        , 34.58689828],
       [ 1.        ,  4.20324493],
       [40.        , 84.88861089],
       [24.        , 51.76389152],
       [45.        , 89.47893526],
       [43.        , 88.89279328],
       [28.        , 52.6983042 ],
       [ 7.        , 12.45560727],
       [25.        , 53.94606664],
       [42.        , 81.80443992],
       [15.        , 31.7046751 ],
       [23.        , 47.92322616],
       [10.        , 19.19194514],
       [34.        , 69.91877114],
       [32.        , 68.5788953 ],
       [ 8.        , 14.96767474],
       [38.        , 71.18288277],
       [19.        , 34.98101489],
       [37.        , 77.34625672]])
train_data = All_data[:40]
test_data = All_data[40:]
train_data,test_data
(array([[ 5.        ,  5.92338595],
        [29.        , 61.78142503],
        [ 9.        , 18.38816734],
        [22.        , 42.13424178],
        [33.        , 66.33165285],
        [16.        , 31.17304802],
        [49.        , 94.30028572],
        [35.        , 68.15515631],
        [31.        , 61.21107625],
        [36.        , 73.86500928],
        [18.        , 32.40386939],
        [14.        , 23.27387593],
        [ 4.        ,  4.46755891],
        [41.        , 84.48165654],
        [27.        , 49.39054783],
        [48.        , 93.87775339],
        [46.        , 96.08595503],
        [26.        , 47.85044211],
        [47.        , 91.93614148],
        [11.        , 23.852195  ],
        [12.        , 21.0445225 ],
        [ 2.        , -0.99885625],
        [39.        , 80.50144315],
        [21.        , 46.68261576],
        [20.        , 43.00744569],
        [ 6.        ,  8.86260211],
        [30.        , 55.98346834],
        [44.        , 84.03226007],
        [ 0.        , -0.82977995],
        [ 3.        ,  4.02332573],
        [13.        , 29.78117436],
        [17.        , 34.58689828],
        [ 1.        ,  4.20324493],
        [40.        , 84.88861089],
        [24.        , 51.76389152],
        [45.        , 89.47893526],
        [43.        , 88.89279328],
        [28.        , 52.6983042 ],
        [ 7.        , 12.45560727],
        [25.        , 53.94606664]]),
 array([[42.        , 81.80443992],
        [15.        , 31.7046751 ],
        [23.        , 47.92322616],
        [10.        , 19.19194514],
        [34.        , 69.91877114],
        [32.        , 68.5788953 ],
        [ 8.        , 14.96767474],
        [38.        , 71.18288277],
        [19.        , 34.98101489],
        [37.        , 77.34625672]]))
np.sum(All_data[:,0] * 2 - All_data[:,1])
16.51220018240119

SGD算法实现(随机梯度下降法)

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-NtBMSxC9-1588587267428)(attachment:50c1c0a0-5b50-47dd-b194-03ee427b53a7.png)]

#超参
lr = 0.001 # 学习率
N = 100   # 
epsilon = 200 #错误
# 需要学习的参数
theta = np.random.rand()
theta
0.14198075923882703
Num = 1
theta_list = []
loss_list = []
while True:
    # 重新排序D
    np.random.shuffle(train_data)
    for n in range(N) :
        # 取随机样本
        randint = np.random.randint(0,40)
        rand_x = train_data[randint][0]
        rand_y = train_data[randint][1]
        # 计算梯度
        grad = rand_x * (rand_x * theta - rand_y)
        # 更新参数theta
        theta = theta - lr * grad
    # 计算更新theta后的错误率
    X = train_data[:,0]
    y = train_data[:,1]
    loss = np.sum(0.5*(theta*X-y)**2)
    print("Number: %d , theta: %f loss: %f"%(Num,theta,loss))
    Num = Num + 1
    theta_list.append(theta)
    loss_list.append(loss)
    if loss < epsilon :
        break
Number: 1 , theta: 1.932683 loss: 262.685475
Number: 2 , theta: 2.214640 loss: 959.517237
Number: 3 , theta: 1.624445 loss: 2447.005804
Number: 4 , theta: 2.011349 loss: 198.118053
# 画图
plt.plot(range(len(theta_list)),theta_list)
[<matplotlib.lines.Line2D at 0x1ecd8bd16c8>]

plt.plot(range(len(loss_list)),loss_list)
[<matplotlib.lines.Line2D at 0x1ecd8cf5a08>]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-LRkzBgik-1588587267439)(output_21_1.png)]

# 作业:批量的梯度下降法和小批量的随机梯度下降法
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

该用户没有用户名

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值