import numpy
def sigmoid(Z):
A = 1 / (1 + numpy.exp(-Z))
return A
def sigmoid_backward(A):
dZ = A * (1 - A)
return dZ
def forward(W, A, b):
Z = numpy.dot(W, A) + b
A = sigmoid(Z)
return A
def cost_compute(output_, Y_):
cost_ = numpy.square(output_ - Y_) / 2
print(f'cost = {cost_}')
return cost_
def backward_out(W, A, output_, Y_, learningRate_):
dZ = (output_ - Y_) * output_ * (1 - output_)
dw = numpy.dot(dZ, A.T)
W -= learningRate_ * dw
return W, dZ
def backward_hide(W_prev, W_current, dZ_, A_current, X, learningRate_):
dZ = numpy.dot(W_prev, dZ_) * A_current * (1 - A_current)
dw = numpy.dot(dZ, X.T)
W_current -= learningRate_ * dw
return W_current, dZ
Y = numpy.empty([2, 1], dtype=float)
Y[0] = 0.01
Y[1] = 0.99
learningRate = 0.5
X = numpy.empty([2, 1], dtype=float)
X[0] = 0.05
X[1] = 0.1
W1 = numpy.empty([2, 2], dtype=float)
W2 = numpy.empty([2, 2], dtype=float)
W1[0][0] = 0.15
W1[0][1] = 0.2
W1[1][0] = 0.25
W1[1][1] = 0.3
W2[0][0] = 0.4
W2[0][1] = 0.45
W2[1][0] = 0.5
W2[1][1] = 0.55
b1 = numpy.empty([2, 1], dtype=float)
b2 = numpy.empty([2, 1], dtype=float)
b1[0] = 0.35
b1[1] = 0.35
b2[0] = 0.6
b2[1] = 0.6
for i in range(100000):
A1 = forward(W1, X, b1)
output = forward(W2, A1, b2)
cost = cost_compute(output, Y)
W2, dZ = backward_out(W2, A1, output, Y, learningRate)
W1, dZ = backward_hide(W2, W1, dZ, A1, X, learningRate)
print("最终输出:",output)
print("目标输出:",Y)
print("偏差值为:",Y-output)
训练结果:
