使用numpy实现一个神经网络
0. 初始化参数
1. 前向传播
2. 计算损失
3. 反向传播
4. 更新参数
5. 训练模型
6. 验证模型

import numpy as np
# 网络层结构
nn_struct = [
{"input_dim": 2, "output_dim": 25, "activation": "relu"}, #输入X的维度为2
{"input_dim": 25, "output_dim": 50, "activation": "relu"},
{"input_dim": 50, "output_dim": 50, "activation": "relu"},
{"input_dim": 50, "output_dim": 25, "activation": "relu"},
{"input_dim": 25, "output_dim": 1, "activation": "sigmoid"} ## sigmoid输出概率
]
# 激活函数
def relu(z):
return np.maximum(0, z)
def sigmoid(z):
return 1/(1+np.exp(-z))
def relu_backward(dA, Z):
dZ = np.array(dA, copy=True)
dZ[Z<=0] = 0
return dZ
def sigmoid_backward(dA, Z):
sig = sigmoid(Z)
return dA * sig * (1 - sig)
## 0、初始化参数:每层的w和b
def init_layers(seed=99):
np.random.seed(seed)
params = {}
for idx, layer in enumerate(nn_struct):
layer_idx = idx + 1
input_size = layer["input_dim"]
output_size = layer["output_dim"]