numpy实现BP神经网络,预测逻辑或运算
模型说明
单隐层神经网络,可调整隐层神经元个数
激活函数:sigmoid
更新模型参数的原理可以参考西瓜书
初始化模型参数权重:参数正态分布
更新偏置,西瓜书里面的阈值
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1.0/(1.0 + np.exp(-x))
def BPNet(dataSet,iterate=300, learning_rate = 2,hiden_layer = 3):
input_layer = len(dataSet[0][0])
out_layer = len(dataSet[0][1])
hiden_Weight = np.random.randn(hiden_layer,input_layer)/np.sqrt(hiden_layer/2.0)
out_Weight = np.random.randn(out_layer, hiden_layer)/np.sqrt(out_layer/2.0)
input_bias = np.random.randn(hiden_layer)
hide_bias = np.random.randn(out_layer)
errors= []
for n in range(iterate):
# 输入和输出
index = np.random.randint(0,len(dataSet))
xinput = np.array(dataSet[index][0])
yout = np.array(dataSet[index][1])
# 隐藏层输出
hiden = sigmoid(np.dot(xinput,hiden_Weight.T))
# 输出层输出
out = sigmoid(np.dot(hiden,out_Weight.T))
# 计算结果误差
error = sum((out-yout)**2)
# 模更新模型参数
g_j = out*(1-out)*(yout-out)
e_h = hiden*(1-hiden)*(g_j.dot(out_Weight))
hiden_Weight +=learning_rate*e_h.reshape(-1,1)*xinput
out_Weight += learning_rate*g_j.reshape(-1,1)*hiden
input_bias += -learning_rate*e_h
hide_bias += -learning_rate*g_j
errors.append(error)
plt.plot(errors)
plt.show()
np.random.seed(0)
dataSet = [ [[1,0],[1]], [[0,1],[1]], [[0,0],[0]],[[1,1],[1]]]
BPNet(dataSet,iterate=300,learning_rate=5,hiden_layer=3)
运行结果: