机器学习之监督学习(二)——神经网络

神经网络

先贴代码:

#! /usr/bin/env python
# coding=utf-8
from sklearn.datasets import load_digits  # 数据集
from sklearn.preprocessing import LabelBinarizer  # 标签二值化
#from sklearn.cross_validation import train_test_split  # 数据集分割 注意新版本则为下面的model_selection
from sklearn.model_selection import train_test_split
import numpy as np

import pylab as pl  # 数据可视化


def sigmoid(x):  # 激活函数
    return 1 / (1 + np.exp(-x))


def dsigmoid(x):  # sigmoid的倒数
    return x * (1 - x)


class NeuralNetwork:
    def __init__(self, layers):  # 这里是三层网络,列表[64,100,10]表示输入,隐藏,输出层的单元个数
        # 初始化权值,范围1~-1
        self.V = np.random.random((layers[0] + 1, layers[1])) * 2 - 1  # 隐藏层权值(65,100),之所以是65,因为有偏置W0
        self.W = np.random.random((layers[1], layers[2])) * 2 - 1  # (100,10)

    def train(self, X, y, lr=0.1, epochs=10000):
        # lr为学习率,epochs为迭代的次数
        # 为数据集添加偏置
        temp = np.ones([X.shape[0], X.shape[1] + 1])
        temp[:, 0:-1] = X
        X = temp  # 这里最后一列为偏置

        # 进行权值训练更新
        for n in range(epochs + 1):
            i = np.random.randint(X.shape[0])  # 随机选取一行数据(一个样本)进行更新
            x = X[i]
            x = np.atleast_2d(x)  # 转为二维数据

            L1 = sigmoid(np.dot(x, self.V))  # 隐层输出(1,100)
            L2 = sigmoid(np.dot(L1, self.W))  # 输出层输出(1,10)

            # delta
            L2_delta = (y[i] - L2) * dsigmoid(L2)  # (1,10)
            L1_delta = L2_delta.dot(self.W.T) * dsigmoid(L1)  # (1,100),这里是数组的乘法,对应元素相乘

            # 更新
            self.W += lr * L1.T.dot(L2_delta)  # (100,10)
            self.V += lr * x.T.dot(L1_delta)  #

            # 每训练1000次预测准确率
            if n % 1000 == 0:
                predictions = []
                for j in range(X_test.shape[0]):
                    out = self.predict(X_test[j])  # 用验证集去测试
                    predictions.append(np.argmax(out))  # 返回预测结果
                accuracy = np.mean(np.equal(predictions, y_test))  # 求平均值
                print('epoch:', n, 'accuracy:', accuracy)

    def predict(self, x):
        # 添加转置,这里是一维的
        temp = np.ones(x.shape[0] + 1)
        temp[0:-1] = x
        x = temp
        x = np.atleast_2d(x)

        L1 = sigmoid(np.dot(x, self.V))  # 隐层输出
        L2 = sigmoid(np.dot(L1, self.W))  # 输出层输出
        return L2


digits = load_digits()  # 载入数据
X = digits.data  # 数据

y = digits.target  # 标签
# print y[0:10]

# 数据归一化,一般是x=(x-x.min)/x.max-x.min
X -= X.min()
X /= X.max()

# 创建神经网络
nm = NeuralNetwork([64, 100, 10])

X_train, X_test, y_train, y_test = train_test_split(X, y)  # 默认分割:3:1

# 标签二值化
labels_train = LabelBinarizer().fit_transform(y_train)
# print labels_train[0:10]
labels_test = LabelBinarizer().fit_transform(y_test)

print
'start'

nm.train(X_train, labels_train, epochs=20000)

print
'end'

再看结果

epoch: 0 accuracy: 0.10444444444444445
epoch: 1000 accuracy: 0.58
epoch: 2000 accuracy: 0.8511111111111112
epoch: 3000 accuracy: 0.9066666666666666
epoch: 4000 accuracy: 0.9288888888888889
epoch: 5000 accuracy: 0.9177777777777778
epoch: 6000 accuracy: 0.9444444444444444
epoch: 7000 accuracy: 0.9422222222222222
epoch: 8000 accuracy: 0.9444444444444444
epoch: 9000 accuracy: 0.9488888888888889
epoch: 10000 accuracy: 0.9511111111111111
epoch: 11000 accuracy: 0.9555555555555556
epoch: 12000 accuracy: 0.9533333333333334
epoch: 13000 accuracy: 0.9511111111111111
epoch: 14000 accuracy: 0.9533333333333334
epoch: 15000 accuracy: 0.96
epoch: 16000 accuracy: 0.9533333333333334
epoch: 17000 accuracy: 0.96
epoch: 18000 accuracy: 0.96
epoch: 19000 accuracy: 0.96
epoch: 20000 accuracy: 0.96

也可参考:https://blog.youkuaiyun.com/huakai16/article/details/77479127

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值