神经网络(c++)【转】

来自:http://blog.youkuaiyun.com/calcular/article/details/50392502

#ifndef DATASER_H
#define DATASER_H

#include<iostream>
#include<math.h>
#include<time.h>
#include<stdlib.h>
#include<algorithm>
#include<queue>
using namespace std;

#define NETL 5	//输入维度
#define NETX 10	//每层多少个单元,大于输入维度

/*	产生正太分布	*/	
double gaussrand()
{
	static double V1, V2, S;
	static int phase = 0;
	double X;
	if (phase == 0) {
		do {
			double U1 = (double)rand() / RAND_MAX;
			double U2 = (double)rand() / RAND_MAX;
			V1 = 2 * U1 - 1;
			V2 = 2 * U2 - 1;
			S = V1 * V1 + V2 * V2;
		} while (S >= 1 || S == 0);
		X = V1 * sqrt(-2 * log(S) / S);
	}
	else
		X = V2 * sqrt(-2 * log(S) / S);
	phase = 1 - phase;
	return X;
}

/* 找到最小值 */
int fminm(double *in,int dim,double &minm)	//已绝对化处理
{
	int i,c;
	c = -1;
	double minx = 99999.9;
	for (i = 0;i < dim;i++)
	{
		if (fabs(in[i]) < minx)
		{
			minx = fabs(in[i]);
			c = i;
		}
	}
	minm = minx;
	return c;
}

/*	找到第二小	*/
int fmin_d2(double *in, int dim,const double minm)	//三个都是输入
{
	int i, c;
	c = -1;
	double minx = 99999.9;
	for (i = 0;i < dim;i++)
	{
		if (fabs(in[i]) < minx)
		{
			if (fabs(in[i]) != minm)
			{
				minx = fabs(in[i]);
				c = i;
			}
		}
	}
	return c;
}

/* 打印一行数据 */
void disp(const double *data,const int dim)
{
	int i;
	for (i = 0;i < dim;i++)
	{
		printf("%-10f ", data[i]);
	}
	printf("\n");
}

/*
	计算单元:多维降一维
	in:输入信息
	w:权重
	dim:维度
*/
double calo(const double *in,const double *w,const int dim)
{
	double r = 0;
	int i;
	for (i = 0;i < dim;i++)
	{
		r += in[i] * w[i];
	}
	r = 1.0 / (1.0 + exp(-1.0*r));
	return r;
}

/* 降维得到误差值 */
double acalo(const double *in,const double *w,const int dim,const double expect)
{
	double r;
	r = calo(in, w, dim);	//线性降维
	return r - expect;	//当前点在期望点上方还是下方,相对位置
}

/* 
	单次维度系数调整 
	用法示例:error[i] = tra(in, w[i], N, ex, offset[i], e[i]);
*/
double tra(	//返回更改权重后的误差
	const double *in,
	double *w,	//会得到更改的权重系数
	const int dim,
	const double expect,
	double *offset,	//权重调整值记录数组
	double &e	//得到误差改进值
	)
{
	double r,err,re,v;
	int i;
	r=calo(in, w, dim);
	err = r - expect;
	v = 0.8;	//学习速率  34,16,9,8,8,9,15,59,x,x
	for (i = 0;i < dim;i++)
	{
		offset[i] = v*err*r*(1.0 - r)*in[i];
	}
	for (i = 0;i < dim;i++)
	{
		w[i] -= offset[i];
	}
	re= acalo(in, w, dim,expect);
	e= fabs(re) - fabs(err);
	//disp(offset, dim);
	//printf("err:%-10f e:%-10f\n", re, e);
	return re;
}

/* 
	多层复杂网络求输出 
*/

double calo2(
	const double in[NETL],	//输入
	const double w1[NETX][NETL],	//一层权值,输入层
	const double w2[NETX][NETX],	//二层权值,隐层
	const double w3[NETX][NETX],	//三层权值,隐层
	const double w4[NETX]	//输出层权值
	)
{
	int i, j;
	double sum;
	double out[NETX][2];	//输出缓冲
	for (i = 0;i < NETX;i++) {
		sum = 0;	//第一层输出
		for (j = 0;j < NETL;j++)  sum += w1[i][j] * in[j];
		out[i][0] = 1.0 / (1.0 + exp(-1.0*sum));
	}
	for (i = 0;i < NETX;i++) {	//第二层输出
		sum = 0;
		for (j = 0;j < NETX;j++)  sum += w2[i][j] * out[j][0];
		out[i][1] = 1.0 / (1.0 + exp(-1.0*sum));
	}
	for (i = 0;i < NETX;i++) {	//第三层输出
		sum = 0;
		for (j = 0;j < NETX;j++)  sum += w3[i][j] * out[j][1];
		out[i][0] = 1.0 / (1.0 + exp(-1.0*sum));
	}
	sum = 0;
	for (i = 0;i < NETX;i++)  sum += out[i][0] * w4[i];
	return	(1.0 / (1.0 + exp(-1.0*sum)));
}

/* 复杂网络单次训练 */
double tra2(	//训练网络,将会改变权重系数,得到误差
	const double in[NETL],	//输入
	double w1[NETX][NETL],	//一层权值,输入层
	double w2[NETX][NETX],	//二层权值,隐层
	double w3[NETX][NETX],	//三层权值,隐层
	double w4[NETX],	//输出层权值
	const double expect
	)
{
	int i, j;
	double sum,res,err;
	double out[NETX][3];//输出缓冲
	for (i = 0;i < NETX;i++) {
		sum = 0;	//第一层输出
		for (j = 0;j < NETL;j++)  sum += w1[i][j] * in[j];
		out[i][0] = 1.0 / (1.0 + exp(-1.0*sum));
	}
	for (i = 0;i < NETX;i++) {	//第二层输出
		sum = 0;
		for (j = 0;j < NETX;j++)  sum += w2[i][j] * out[j][0];
		out[i][1] = 1.0 / (1.0 + exp(-1.0*sum));
	}
	for (i = 0;i < NETX;i++) {	//第三层输出
		sum = 0;
		for (j = 0;j < NETX;j++)  sum += w3[i][j] * out[j][1];
		out[i][2] = 1.0 / (1.0 + exp(-1.0*sum));
	}
	sum = 0;
	for (i = 0;i < NETX;i++)  sum += out[i][2] * w4[i];
	res=(1.0 / (1.0 + exp(-1.0*sum)));
	err = res - expect;
	/* 梯度下降传递误差 */
	double lost[NETX][3];	//误差矩阵
	for (i = 0;i < NETX;i++)	//传递第三层误差
	{
		lost[i][2] = err*res*(1.0 - res)*w4[i];
	}
	for (i = 0;i < NETX;i++)	//传递第二层误差
	{
		sum = 0;
		for (j = 0;j < NETX;j++)
		{
			sum += lost[j][2] * out[j][2] * (1.0 - out[j][2])*w3[j][i];
		}
		lost[i][1] = sum;
	}
	for (i = 0;i < NETX;i++)	//传递第一层误差
	{
		sum = 0;
		for (j = 0;j < NETX;j++)
		{
			sum += lost[j][1] * out[j][1] * (1.0 - out[j][1])*w2[j][i];
		}
		lost[i][0] = sum;
	}
	/* 更新权值 */
	double v;	//学习速率
	v = 0.1;	
	for (i = 0;i < NETX;i++)	//更新第一层权值
	{
		for (j = 0;j < NETL;j++)
		{
			w1[i][j] -= v*lost[i][0] * out[i][0] * (1.0 - out[i][0])*in[j];
		}
	}
	for (i = 0;i < NETX;i++)	//更新第二层权值
	{
		for (j = 0;j < NETX;j++)
		{
			w2[i][j] -= v*lost[i][1] * out[i][1] * (1.0 - out[i][1])*out[j][0];
		}
	}
	for (i = 0;i < NETX;i++)	//更新第三层权值
	{
		for (j = 0;j < NETX;j++)
		{
			w3[i][j] -= v*lost[i][2] * out[i][2] * (1.0 - out[i][2])*out[j][1];
		}
	}
	for (i = 0;i < NETX;i++)	//更新第四层权值
	{
		w4[i] -= v*err*res*(1.0 - res)*out[i][2];
	}
	double cals;
	cals=calo2(in, w1, w2, w3, w4);
	return cals - expect;
}

/*	网络结构体	*/
typedef struct stu_unit2 {
	double w1[NETX][NETL];
	double w2[NETX][NETX];
	double w3[NETX][NETX];
	double w4[NETX];
}unit2;

void ninit(unit2 &inet)
{
	int  i, j;
	for (i = 0;i < NETX;i++)
		for (j = 0;j < NETL;j++)
			inet.w1[i][j] = (2.0*rand() / RAND_MAX) - 1.0;
	for (i = 0;i < NETX;i++)
		for (j = 0;j < NETX;j++)
			inet.w2[i][j] = (2.0*rand() / RAND_MAX) - 1.0;
	for (i = 0;i < NETX;i++)
		for (j = 0;j < 5;j++)
			inet.w3[i][j] = (2.0*rand() / RAND_MAX) - 1.0;
	for (i = 0;i < NETX;i++)
		inet.w4[i] = (2.0*rand() / RAND_MAX) - 1.0;
}

/*	随机梯度下降训练	*/
double strain2(unit2 inet,double *in[NETL],double *expect,const int exdim)
{
	double los;
	int co;
	co = rand() % exdim;
	los=tra2(in[co], inet.w1, inet.w2, inet.w3, inet.w4, expect[co]);
	return los;
}

/*	直接训练组	*/
double dtrain2(unit2 &inet, double in[][NETL], double *expect,  int exdim)
{
	double *los = new double[exdim];
	for (int i = 0;i < exdim;i++)
		los[i] = tra2(in[i], inet.w1,
			inet.w2, inet.w3, inet.w4, expect[i]);
	double sum = 0.0;
	for (int i = 0;i < exdim;i++)
	{
		sum += fabs(los[i]);
		//cout << los[i] << endl;
	}
	delete[]los;
	return sum/(double)exdim;
}

/*	输出网络	*/
double ncal(unit2 inet, double in[NETL])
{
	return calo2(in, inet.w1, inet.w2, inet.w3, inet.w4);
}

#define SAVEPATH "net.dat"

/*	储存网络	*/
void nsave(unit2 inet)
{
	FILE *pFile = fopen(SAVEPATH, "wb");
	fwrite(&inet, sizeof(unit2), 1, pFile);
	fclose(pFile);
}

/*	加载网络	*/	
void nload(unit2 &inet)
{
	FILE *fp = fopen(SAVEPATH, "rb");
	fread(&inet, sizeof(unit2), 1, fp);
	fclose(fp);
}

#endif


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值