void calc_activ_func( Mat& sums, const Mat& w ) const
{
const double* bias = w.ptr<double>(w.rows-1);
int i, j, n = sums.rows, cols = sums.cols;
{
case IDENTITY:
scale = 1.;
break;
case SIGMOID_SYM:
scale = -f_param1;
break;
case GAUSSIAN:
scale = -f_param1*f_param1;
break;
default:
;
}
CV_Assert( sums.isContinuous() );
if( activ_func != GAUSSIAN )
{
for( i = 0; i < n; i++ )
{
}
if( activ_func == IDENTITY )
return;
}
else
{
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
{
double t = data[j] + bias[j];
data[j] = t*t*scale;
}
}
}
{
cols *= n;
n = 1;
}
switch( activ_func )
{
case SIGMOID_SYM:
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
{
if(!cvIsInf(data[j]))
{
double t = scale2*(1. - data[j])/(1. + data[j]);
data[j] = t;
}
else
{
data[j] = -scale2;
}
}
}
break;
case GAUSSIAN:
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
data[j] = scale2*data[j];
}
break;
default:
;
}
{
const double* bias = w.ptr<double>(w.rows-1);
int i, j, n = sums.rows, cols = sums.cols;
double scale = 0, scale2 = f_param2;
{
case IDENTITY:
scale = 1.;
break;
case SIGMOID_SYM:
scale = -f_param1;
break;
case GAUSSIAN:
scale = -f_param1*f_param1;
break;
default:
;
}
CV_Assert( sums.isContinuous() );
if( activ_func != GAUSSIAN )
{
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
data[j] = (data[j] + bias[j])*scale; // data[j] 即 Weight[LayerIndex][j] 数组与LayerIn的内积}
if( activ_func == IDENTITY )
return;
}
else
{
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
{
double t = data[j] + bias[j];
data[j] = t*t*scale;
}
}
}
exp( sums, sums ); //自然数 e 的 sums(I) 次幂, scale 是负数
if( sums.isContinuous() ){
cols *= n;
n = 1;
}
switch( activ_func )
{
case SIGMOID_SYM:
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
{
if(!cvIsInf(data[j]))
{
double t = scale2*(1. - data[j])/(1. + data[j]);
data[j] = t;
}
else
{
data[j] = -scale2;
}
}
}
break;
case GAUSSIAN:
for( i = 0; i < n; i++ )
{
double* data = sums.ptr<double>(i);
for( j = 0; j < cols; j++ )
data[j] = scale2*data[j];
}
break;
default:
;
}
}
结合上面的代码看, ANN_MLP::SIGMOID_SYM 的激励函数应该使用的是"双曲正切”
可以参考:http://www.opencv.org.cn/opencvdoc/2.3.2/html/modules/ml/doc/neural_networks.html