2-神经网络起源-demo2-GRE的反向传播实现.py

本文介绍了一个使用梯度下降训练的神经网络模型,该模型用于预测研究生学院的录取情况。数据包含GRE成绩、GPA和本科院校排名等特征,通过特征工程处理后,采用批量梯度下降和随机梯度下降两种方式进行训练,最终评估模型的预测准确性。

1.代码

"""
案例:研究生学院的录取数据,用梯度下降训练一个网络。
数据有3个特征:GRE  GPA  和 本科院校的排名 rank(从1到4),1代表最好的,4代表最差的。
"""


import numpy as np
import pandas as pd


admissions = pd.read_csv(filepath_or_buffer='../datas/11.csv')


def data_explore(admissions):
    print(admissions.head(10))
    print(admissions.info())
    print(admissions.describe())
    print(admissions['admit'].value_counts())

"""
数据处理:
    1、分类变量进行哑编码。
    2、对连续变量标准化。
"""


def data_transform(admissions):
    # 1、分类变量哑编码
    data = pd.concat([admissions, pd.get_dummies(admissions['rank'], prefix='rank')], axis=1)
    data = data.drop('rank', axis=1)

    # 2、gre gpa的标准化(消除量纲)
    # fixme 注意标准做法:先拆分数据集,再用训练数据集的统计量去 标准化验证 和 测试数据集。
    for field in ['gre', 'gpa']:
        mean, std = data[field].mean(), data[field].std()
        data.loc[:, field] = (data[field] - mean) / std
    # print(data)
    # 3、数据集的拆分:(训练  和  测试数据集)
    np.random.seed(42)
    sample = np.random.choice(data.index, size=int(len(data) * 0.9), replace=False)
    train_data, test_data = data.iloc[sample], data.drop(sample)

    # 4、特征和  目标值拆分(features 和 targets)
    features_train, targets_train = train_data.drop('admit', axis=1), train_data['admit']
    features_test, targets_test = test_data.drop('admit', axis=1), test_data['admit']

    return features_train, targets_train, features_test, targets_test


# ***************************************
def sigmoid(x):
    return 1/(1 + np.exp(-x))


# todo - 注意,虽然是分类任务,但我们这里是用的MSE作为损失函数
def gre_bp_work(features, targets, features_test, targets_test):
    # 超参数设置(这里我们只设置一层隐藏层)
    n_hidden = 2  # 为隐藏层节点数量,为 2
    epochs = 2000
    learnrate = 0.06

    # 获取 样本数量(n_records)  和特征数量(n_features)
    n_records, n_features = features.shape
    last_loss = None
    # 初始化权重
    # todo:注意有2个权重需要学习,所以均需要初始化
    weights_input_hidden = np.random.normal(
        scale=1 / n_features ** .5, size=(n_features, n_hidden)
    )
    weights_hidden_output = np.random.normal(
        scale=1 / n_hidden ** .5, size=n_hidden
    )

    for e in range(1, epochs):
        # 定义2个 del_w 初始化为0,用于存储梯度
        del_w_input_hidden = np.zeros(weights_input_hidden.shape)
        del_w_hidden_output = np.zeros(weights_hidden_output.shape)
        # 同样,我们每次遍历一个数据样本
        for x, y in zip(features.values, targets):
            ## 正向传播 ##
            # 需要编程: 计算 输出output
            hidden_input = np.matmul(x, weights_input_hidden)
            hidden_output = sigmoid(hidden_input)
            output = sigmoid(np.matmul(hidden_output, weights_hidden_output))

            ## 反向传播 ##
            # 需要编程: 计算预测误差
            error = output - y  # 标量

            # 需要编程: 对输出层output计算error term
            output_error_term = error * output * (1-output)  # 标量

            # 将误差反向传播给隐藏层
            # 需要编程: 计算隐藏层对 输出层误差的贡献
            hidden_error = output_error_term * weights_hidden_output  # 向量  (n_hidden, )

            # 需要编程: 计算隐藏层的error term
            hidden_error_term = hidden_error * hidden_output * (1-hidden_output)

            # 需要编程: 更新权重梯度  Update the change in weights
            del_w_hidden_output += hidden_output * output_error_term  # 向量 (n_hidden, )
            del_w_input_hidden += x[:, None] * hidden_error_term#x[:, None]表示n*1的矩阵

            # 需要编程: 更新权重  Update weights
        weights_input_hidden -= del_w_input_hidden * learnrate / n_records#BGD批量梯度下降
        weights_hidden_output -= del_w_hidden_output * learnrate / n_records

        # 打印均方差
        if e % 20 == 0:
            # 正向传播
            hidden_output = sigmoid(np.dot(features, weights_input_hidden))
            out = sigmoid(np.dot(hidden_output,
                                 weights_hidden_output))
            loss = np.mean((out - targets) ** 2)

            if last_loss and last_loss < loss:
                print("Train loss: ", loss, "  WARNING - Loss Increasing")
            else:
                print("Train loss: ", loss)
            last_loss = loss

    # 计算准确率
    hidden = sigmoid(np.dot(features_test, weights_input_hidden))
    out = sigmoid(np.dot(hidden, weights_hidden_output))
    predictions = out > 0.5
    accuracy = np.mean(predictions == targets_test)
    print("Prediction accuracy: {:.3f}".format(accuracy))


# todo - 注意,虽然是分类任务,但我们这里是用的MSE作为损失函数
def gre_bp_with_SGD(features, targets, features_test, targets_test):
    # 超参数设置(这里我们只设置一层隐藏层)
    n_hidden = 6  # 为隐藏层节点数量,为 2
    epochs = 2000
    learnrate = 0.06

    # 获取 样本数量(n_records)  和特征数量(n_features)
    n_records, n_features = features.shape
    last_loss = None
    # 初始化权重
    # todo:注意有2个权重需要学习,所以均需要初始化
    weights_input_hidden = np.random.normal(
        scale=1 / n_features ** .5, size=(n_features, n_hidden)
    )
    weights_hidden_output = np.random.normal(
        scale=1 / n_hidden ** .5, size=n_hidden
    )

    for e in range(1, epochs):
        # 定义2个 del_w 初始化为0,用于存储梯度
        # del_w_input_hidden = np.zeros(weights_input_hidden.shape)
        # del_w_hidden_output = np.zeros(weights_hidden_output.shape)
        # 同样,我们每次遍历一个数据样本
        for x, y in zip(features.values, targets):
            ## 正向传播 ##
            # 需要编程: 计算 输出output
            hidden_input = np.matmul(x, weights_input_hidden)
            hidden_output = sigmoid(hidden_input)
            output = sigmoid(np.matmul(hidden_output, weights_hidden_output))

            ## 反向传播 ##
            # 需要编程: 计算预测误差
            error = output - y  # 标量

            # 需要编程: 对输出层output计算error term
            output_error_term = error * output * (1-output)  # 标量

            # 将误差反向传播给隐藏层
            # 需要编程: 计算隐藏层对 输出层误差的贡献
            hidden_error = output_error_term * weights_hidden_output  # 向量  (n_hidden, )

            # 需要编程: 计算隐藏层的error term
            hidden_error_term = hidden_error * hidden_output * (1-hidden_output)
            # 需要编程: 更新权重梯度  Update the change in weights
            # 需要编程: 更新权重  Update weights
            weights_input_hidden -= x[:, None] * hidden_error_term * learnrate#SGD
            weights_hidden_output -= hidden_output * output_error_term * learnrate

        # 打印均方差
        if e % 20 == 0:
            # 正向传播
            hidden_output = sigmoid(np.dot(features, weights_input_hidden))
            out = sigmoid(np.dot(hidden_output,
                                 weights_hidden_output))
            loss = np.mean((out - targets) ** 2)

            if last_loss and last_loss < loss:
                print('Epoch:{}'.format(e))
                print("Train loss: ", loss, "  WARNING - Loss Increasing")
            else:
                print('Epoch:{}'.format(e))
                print("Train loss: ", loss)
            last_loss = loss

    # 计算准确率
    hidden = sigmoid(np.dot(features_test, weights_input_hidden))
    out = sigmoid(np.dot(hidden, weights_hidden_output))
    predictions = out > 0.5
    accuracy = np.mean(predictions == targets_test)
    print("Prediction accuracy: {:.3f}".format(accuracy))


if __name__ == '__main__':
    # data_explore(admissions)
    features_train, targets_train, features_test, targets_test = data_transform(admissions)
    gre_bp_work(features_train, targets_train, features_test, targets_test)
    #gre_bp_with_SGD(features_train, targets_train, features_test, targets_test)


2.输出

2.1 data_explore(admissions)

if __name__ == '__main__':
    data_explore(admissions)
    # features_train, targets_train, features_test, targets_test = data_transform(admissions)
    # # gre_bp_work(features_train, targets_train, features_test, targets_test)
    # gre_bp_with_SGD(features_train, targets_train, features_test, targets_test)
C:\Anaconda3\python.exe D:/AI/04-深度学习/1-深度学习入门/demo/03-GRE反向传播案例/02_GRE的反向传播实现.py
   admit  gre   gpa  rank
0      0  380  3.61     3
1      1  660  3.67     3
2      1  800  0.04     1
3      1  640  3.19     4
4      0  520  2.93     4
5      1  760  0.03     2
6      1  560  2.98     1
7      0  400  3.08     2
8      1  540  3.39     3
9      0  700  3.92     2
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 400 entries, 0 to 399
Data columns (total 4 columns):
admit    400 non-null int64
gre      400 non-null int64
gpa      400 non-null float64
rank     400 non-null int64
dtypes: float64(1), int64(3)
memory usage: 12.6 KB
None
            admit         gre         gpa       rank
count  400.000000  400.000000  400.000000  400.00000
mean     0.317500  587.700000    3.083000    2.48500
std      0.466087  115.516536    0.960953    0.94446
min      0.000000  220.000000    0.030000    1.00000
25%      0.000000  520.000000    3.010000    2.00000
50%      0.000000  580.000000    3.330000    2.00000
75%      1.000000  660.000000    3.590000    3.00000
max      1.000000  800.000000    3.990000    4.00000
0    273
1    127
Name: admit, dtype: int64

Process finished with exit code 0

2.2 特征工程

if __name__ == '__main__':
    # data_explore(admissions)
    features_train, targets_train, features_test, targets_test = data_transform(admissions)

2.3 打印gre_bp_work()

if __name__ == '__main__':
    # data_explore(admissions)
    features_train, targets_train, features_test, targets_test = data_transform(admissions)
    gre_bp_work(features_train, targets_train, features_test, targets_test)
    # gre_bp_with_SGD(features_train, targets_train, features_test, targets_test)
D:\Anaconda\python.exe D:/AI20/HJZ/04-深度学习/1-深度学习入门/GRE反向传播demo/AI20/02_GRE的反向传播实现.py
Train loss:  0.2221308523508395
Train loss:  0.22187860894691622
Train loss:  0.2216397014123261
Train loss:  0.2214131069472161
Train loss:  0.22119788784897867
Train loss:  0.22099318409982732
Train loss:  0.22079820662114935
Train loss:  0.22061223113444273
Train loss:  0.22043459257375256
Train loss:  0.22026467999929836
Train loss:  0.2201019319664233
Train loss:  0.2199458323081054
Train loss:  0.2197959062930455
Train loss:  0.2196517171248279
Train loss:  0.2195128627508207
Train loss:  0.2193789729523853
Train loss:  0.2192497066906157
Train loss:  0.21912474968422974
Train loss:  0.21900381219842413
Train loss:  0.21888662702548853
Train loss:  0.2187729476397821
Train loss:  0.21866254651129108
Train loss:  0.21855521356348698
Train loss:  0.2184507547625194
Train loss:  0.21834899082600137
Train loss:  0.21824975604073807
Train loss:  0.21815289717973615
Train loss:  0.21805827250972815
Train loss:  0.21796575088126086
Train loss:  0.21787521089412137
Train loss:  0.2177865401315432
Train loss:  0.2176996344572352
Train loss:  0.21761439736980992
Train loss:  0.21753073940969284
Train loss:  0.21744857761402717
Train loss:  0.21736783501549373
Train loss:  0.21728844018133797
Train loss:  0.21721032678921823
Train loss:  0.2171334332367894
Train loss:  0.21705770228221447
Train loss:  0.2169830807130396
Train loss:  0.21690951904108882
Train loss:  0.21683697122124562
Train loss:  0.21676539439216252
Train loss:  0.2166947486371251
Train loss:  0.21662499676342378
Train loss:  0.21655610409875756
Train loss:  0.21648803830328972
Train loss:  0.21642076919611045
Train loss:  0.21635426859496248
Train loss:  0.2162885101681752
Train loss:  0.21622346929784592
Train loss:  0.2161591229533849
Train loss:  0.21609544957461485
Train loss:  0.2160324289636768
Train loss:  0.2159700421850628
Train loss:  0.21590827147314554
Train loss:  0.21584710014662536
Train loss:  0.21578651252936695
Train loss:  0.2157264938771364
Train loss:  0.21566703030978446
Train loss:  0.2156081087484702
Train loss:  0.21554971685753518
Train loss:  0.21549184299068302
Train loss:  0.21543447614113628
Train loss:  0.21537760589547436
Train loss:  0.2153212223908762
Train loss:  0.21526531627551404
Train loss:  0.21520987867186178
Train loss:  0.21515490114270544
Train loss:  0.21510037565964876
Train loss:  0.21504629457393792
Train loss:  0.2149926505894267
Train loss:  0.21493943673753002
Train loss:  0.21488664635401247
Train loss:  0.21483427305749053
Train loss:  0.2147823107295047
Train loss:  0.21473075349606205
Train loss:  0.21467959571053502
Train loss:  0.214628831937818
Train loss:  0.2145784569396494
Train loss:  0.21452846566101894
Train loss:  0.21447885321757237
Train loss:  0.2144296148839513
Train loss:  0.21438074608299265
Train loss:  0.21433224237572815
Train loss:  0.21428409945212618
Train loss:  0.21423631312252236
Train loss:  0.21418887930968392
Train loss:  0.2141417940414714
Train loss:  0.21409505344404403
Train loss:  0.2140486537355772
Train loss:  0.2140025912204488
Train loss:  0.21395686228386543
Train loss:  0.2139114633868931
Train loss:  0.2138663910618626
Train loss:  0.2138216419081261
Train loss:  0.21377721258813434
Train loss:  0.2137330998238135
Prediction accuracy: 0.750

Process finished with exit code 0

2.4 打印gre_bp_with_SGD()

if __name__ == '__main__':
    # data_explore(admissions)
    features_train, targets_train, features_test, targets_test = data_transform(admissions)
    # gre_bp_work(features_train, targets_train, features_test, targets_test)
    gre_bp_with_SGD(features_train, targets_train, features_test, targets_test)
C:\Anaconda3\python.exe D:/AI/04-深度学习/1-深度学习入门/demo/03-GRE反向传播案例/02_GRE的反向传播实现.py
Epoch:20
Train loss:  0.2044030473320658
Epoch:40
Train loss:  0.20075444275448595
Epoch:60
Train loss:  0.2002023410198806
Epoch:80
Train loss:  0.19994601723907077
Epoch:100
Train loss:  0.19961184119508618
Epoch:120
Train loss:  0.19915540199087378
Epoch:140
Train loss:  0.1986347242825377
Epoch:160
Train loss:  0.1981111268070723
Epoch:180
Train loss:  0.1976090024125285
Epoch:200
Train loss:  0.19713190878869916
Epoch:220
Train loss:  0.19667673908454833
Epoch:240
Train loss:  0.19623545058629596
Epoch:260
Train loss:  0.19579120393719135
Epoch:280
Train loss:  0.1953163152073244
Epoch:300
Train loss:  0.19477940081389344
Epoch:320
Train loss:  0.19416136947532756
Epoch:340
Train loss:  0.1934628944297282
Epoch:360
Train loss:  0.19269607072350792
Epoch:380
Train loss:  0.1918807501159844
Epoch:400
Train loss:  0.19104748234874844
Epoch:420
Train loss:  0.19022912316952525
Epoch:440
Train loss:  0.1894489740855552
Epoch:460
Train loss:  0.1887198736052238
Epoch:480
Train loss:  0.188048210682915
Epoch:500
Train loss:  0.1874358811944024
Epoch:520
Train loss:  0.1868807965494762
Epoch:540
Train loss:  0.18637762993208126
Epoch:560
Train loss:  0.1859192011436504
Epoch:580
Train loss:  0.18549820255714897
Epoch:600
Train loss:  0.18510856212501506
Epoch:620
Train loss:  0.18474569595090218
Epoch:640
Train loss:  0.1844059246401011
Epoch:660
Train loss:  0.18408603857367492
Epoch:680
Train loss:  0.1837832279963255
Epoch:700
Train loss:  0.18349506877979718
Epoch:720
Train loss:  0.18321946642739068
Epoch:740
Train loss:  0.18295460306346747
Epoch:760
Train loss:  0.18269891443675637
Epoch:780
Train loss:  0.18245110187815408
Epoch:800
Train loss:  0.18221017762366243
Epoch:820
Train loss:  0.1819755321899482
Epoch:840
Train loss:  0.18174699461132163
Epoch:860
Train loss:  0.1815248405364749
Epoch:880
Train loss:  0.18130970820771905
Epoch:900
Train loss:  0.1811024167171446
Epoch:920
Train loss:  0.1809037258998389
Epoch:940
Train loss:  0.18071410170874502
Epoch:960
Train loss:  0.18053354412480624
Epoch:980
Train loss:  0.1803615093073338
Epoch:1000
Train loss:  0.18019692721681507
Epoch:1020
Train loss:  0.18003828530102106
Epoch:1040
Train loss:  0.17988372272113717
Epoch:1060
Train loss:  0.17973106816261544
Epoch:1080
Train loss:  0.17957777191663785
Epoch:1100
Train loss:  0.17942074349822848
Epoch:1120
Train loss:  0.17925620965418046
Epoch:1140
Train loss:  0.17907980666131396
Epoch:1160
Train loss:  0.17888708582546492
Epoch:1180
Train loss:  0.17867433117175618
Epoch:1200
Train loss:  0.17843925233618305
Epoch:1220
Train loss:  0.17818113770196406
Epoch:1240
Train loss:  0.17790043085633236
Epoch:1260
Train loss:  0.17759798943536806
Epoch:1280
Train loss:  0.17727434168718875
Epoch:1300
Train loss:  0.17692926837518835
Epoch:1320
Train loss:  0.1765621194138257
Epoch:1340
Train loss:  0.17617324048532776
Epoch:1360
Train loss:  0.1757659932975615
Epoch:1380
Train loss:  0.17534693820568265
Epoch:1400
Train loss:  0.17492280214384218
Epoch:1420
Train loss:  0.17449780283903343
Epoch:1440
Train loss:  0.17407413011812445
Epoch:1460
Train loss:  0.17365354995557325
Epoch:1480
Train loss:  0.1732381440129196
Epoch:1500
Train loss:  0.17283025304778935
Epoch:1520
Train loss:  0.17243219305034774
Epoch:1540
Train loss:  0.17204602193298876
Epoch:1560
Train loss:  0.17167340867671751
Epoch:1580
Train loss:  0.17131558351823994
Epoch:1600
Train loss:  0.1709733400034274
Epoch:1620
Train loss:  0.17064706750680042
Epoch:1640
Train loss:  0.17033680067229437
Epoch:1660
Train loss:  0.17004227754788698
Epoch:1680
Train loss:  0.16976300145365836
Epoch:1700
Train loss:  0.16949830361168658
Epoch:1720
Train loss:  0.1692474047721157
Epoch:1740
Train loss:  0.16900947467770533
Epoch:1760
Train loss:  0.16878368815275607
Epoch:1780
Train loss:  0.16856927570018032
Epoch:1800
Train loss:  0.168365564749275
Epoch:1820
Train loss:  0.1681720057868597
Epoch:1840
Train loss:  0.16798817717563097
Epoch:1860
Train loss:  0.16781376553380797
Epoch:1880
Train loss:  0.16764852554037712
Epoch:1900
Train loss:  0.1674922308932894
Epoch:1920
Train loss:  0.1673446315776486
Epoch:1940
Train loss:  0.16720542865914287
Epoch:1960
Train loss:  0.1670742691366462
Epoch:1980
Train loss:  0.1669507557246184
Prediction accuracy: 0.700

Process finished with exit code 0

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值