import numpy as np
def sigmoid(x):
return 1/(1+np.exp(-x))
def train():
hidden_units=4
learning_rate=0.01
m=train_x.shape[1]
iteration_nums=10000
input_dim=train_x.shape[0]
output_dim=train_y.shape[0]
w1=np.zeros(shape=(hidden_units,input_dim))
b1=np.zeros(shape=(hidden_units,1))
w2=np.zeros(shape=(output_dim,hidden_units))
b1=np.zeros(shape=(output_dim,1))
for i in range(iteration_nums):
#前向传播(交叉熵损失)
z1 = np.dot(w1,train_x) + b1
a1 = np.tanh(z1)
z2 = np.dot(w2, a1) + b2
a2 = sigmoid(z2)
cost=-1/m*np.sum(train_y*np.log(a2)+(1-train_y)*np.log(1-a2))
print("cost after iteration %i:%f" % (i, cost))
#后向迭代更新参数
dz2 = a2-train_y
dw2 = np.dot(dz2,a1.T)/m
db2 = np.sum(dz2)/m
dz1 = np.dot(w2.T,dz2)*(1-np.power(a1,2))
dw1 = np.dot(dz1,train_x.T)/m
db1 = np.sum(dz1)/m
w1=w1-dw1*learning_rate
b1=b1-db1*learning_rate
w2=w2-dw2*learning_rate
b2=b2-db2*learning_rate
return w1,b1,w2,b2
def predict():
train()
z1 = np.dot(w1,test_x) + b1
a1 = np.tanh(z1)
z2 = np.dot(w2, a1) + b2
a2 = sigmoid(z2)
return test_y
EP2.numpy实现浅层神经网络
最新推荐文章于 2021-09-05 19:01:11 发布
