使用了周志华老师写的机器学习中的神经网络的公式,具体公式可以看《机器学习》--周志华第103页,用的时标准BP算法
具体代码在下面,注释明确
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace mssbyi
{
//Random Random = new Random();
class BpDeep
{
Random Random = new Random();
//构造函数,构造bp网络,n为学习率,MST为最大学习次数
public BpDeep(List<int> net_size, double n, int MST, double min_error)
{
//模型参数传入
inputSize = net_size[0];
numLaters = net_size.Count;
netSize = net_size;
outputSize = net_size.Last();
N = n;
MaxStudyTimes = MST;
//设置输入层
layers.Add(new layer(netSize[0], netSize[1], n));
//初始化隐藏层
for (int i = 1; i < numLaters - 1; i++)
{
layers.Add(new layer(netSize[i], netSize[i + 1], n));
}
//初始化输出层
layers.Add(new layer(netSize.Last(), 0, n));//后期需要设置分类数量!!!!!!!!!!!
error = 100;//初使可以用很大的值
minError = min_error;
}
//正向传播
public void forward(List<double> input)
{
layers[0].Y = input;
for (int i = 1; i < numLaters; i++)
{
layers[i].forward(layers[i - 1]);
}
}
//反向传播
public void backward(List<double> output_real)
{
//计算误差
//计算误差和输出层神经元梯度项
List<double> g = new List<double>();
for (int i = 0; i < outputSize; i++)
{
g.Add((layers.Last().Y[i] * (1 - layers.Last().Y[i]) * (output_real[i] - layers.Last().Y[i])));
error += 0.5 * Math.Pow((output_real[i] - layers.Last().Y[i]), 2);
}
//设置输出层的阈值
List<double> db_last = new List<double>();
for (int i = 0; i < layers[numLaters - 1].numNeuron; i++)
{
db_last.Add(-1 * N * g[i]);
//db_last[i] = -1 * N * g[i];
}
layers[numLaters - 1].setDB = db_last;
//计算最后一个隐藏层的神经元梯度项
List<double> e = new List<double>();
for (int i = 0; i < layers[numLaters - 2].numNeuron; i++)
{
double sum = 0;
for (int j = 0; j < outputSize; j++)
{
sum += layers[numLaters - 2].getW[i][j] * g[j];
}
e.Add(layers[numLaters - 2].Y[i] * (1 - layers[numLaters