clear; clc;
format long;
tic
%% 问题:
%% 1、只有当N1等于N2时结果才正确,但按理来说不应该(已解决)
% 收敛条件不对?收敛条件是只有这一个吗?
% 答:是因为当引入N1后,计算失效概率的公式中的N未变为N1
%% 2、为什么自己另找的部分例子不行?
% 难道只能用于输入变量为标准正态分布的情况吗? 应该不是,因为用了两个算例结果还可以
% 输入的问题的K和N1、N2的选择不对?
% 该方法代码初始化的问题? 不是这个原因,毕竟验证了论文中多个算例,结果可以接受
% 可能是这个例子不太适合。 大概率是这个问题,因为这个例子用基于设计点的方法也比较难计算
%% 3、使用论文中的例子验证时,虽然能得到合理结果,但为什么无论是N还是N1都要比论文中的值大?
% 该方法代码初始化和收敛条件的问题?
%% 题外问题:重要抽样法为什么不会改变样本的失效概率?(自己思考)
forwhile = 10;
for zzzzz = 1 : forwhile
miu1 = [0.4 , 0.2 , 0.6 , 1500];
sigma1 = [0.04 , 0.02 , 0.06 , 150];
g = @(x) ninebox(x .* sigma1 + miu1);
% miu = [0.4 , 0.2 , 0.6 , 1500];
% sigma = [0.04 , 0.02 , 0.06 , 150];
miu = [0 , 0 , 0 , 0];
sigma = [1 , 1 , 1 , 1];
NK = 20; % 初始Kriging模型样本数
N = 400; % 确定迭代过程中的抽样次数
N1 = 9 * 10 ^ 4; % 确定迭代后的抽样次数
K = 4; % 聚类分析得到的聚类中心数
Nx = 9; % N1分成的份数
%% 分层设置为20%
%% 基于高斯混合模型的交叉熵重要抽样(适用于输入变量独立同分布情况)
% 一、失效概率
d = size(miu , 2);
% 1.1、初始化
paix = ones(K , 1) .* (1 / K); % 每个单一的高斯模型被选中的概率
for i = 1 : d
miux(: , i) = ones(K , 1) .* miu(: , i) + sigma(: , i) ./ 10 * rand(K , 1);
end
Cx = cell(K , 1);
for i = 1 : K
Cx{i , 1} = diag(ones(1 , d)); % 每个单一的高斯模型对应的协方差矩阵
end
% 1.2、求fx
for i = 1 : d
% fxi = @(x) 1 / sqrt(2 * pi * sigma(i) ^ 2) * exp(-(x(: , i) - miu(i)) .^ 2 / (2 * sigma(i) ^ 2));
eval(['fx',num2str(i),' = @(x) 1 / sqrt(2 * pi * sigma(',num2str(i),') ^ 2) * exp(-(x(: , ',num2str(i),') - miu(',num2str(i),')) .^ 2 / (2 * sigma(',num2str(i),') ^ 2));'])
if i == 1
% fxi = @(x) fxi(x);
eval(['fx',num2str(i), '= @(x) fx',num2str(i),'(x);'])
else
% fxi = @(x) fxi(x) .* fx(i-1)(x); 注意是点乘
eval(['fx',num2str(i), '= @(x) fx',num2str(i),'(x) .* fx',num2str(i-1),'(x);'])
end
end
% fx = @(x) fxd(x);
eval(['fx = @(x) fx',num2str(d),'(x);'])
% 1.3、进行迭代
b = 5;
Cir = 1;
jj = 0;
jjx = 0;
while b > 0
% 1.3.1、求hx_w
for i = 1 : K
% hxi_w = @(x) (2 * pi) ^ (- d / 2) * det(Cx{i , 1}) ^ (- 0.5) * exp(- 0.5 * sum(((x - miux(i , :)) * inv(Cx{i , 1}) .* (x - miux(i , :))) , 2));
eval(['hx',num2str(i),'_w = @(x) (2 * pi) ^ (- d / 2) * det(Cx{',num2str(i),' , 1}) ^ (- 0.5) * exp(- 0.5 * sum(((x - miux(',num2str(i),' , :)) * inv(Cx{',num2str(i),' , 1}) .* (x - miux(',num2str(i),' , :))) , 2));'])
if i == 1
hx_w = @(x) paix(i , 1) * hx1_w(x);
else
% hx_w = @(x) hx_w(x) + paix(i , 1) * hxi_w(x);
eval(['hx_w = @(x) hx_w(x) + paix(',num2str(i),' , 1) * hx',num2str(i),'_w(x);'])
end
end
% 1.3.2、进行抽样(先进行独立变换,再进行反变换得出抽样点)
Xi = cell(K , 1);
sumx = N;
for i = 1 : K
if i == 1
Xi{i , 1} = mvnrnd(miux(i , :) , Cx{i , 1} , round(N * paix(i)));
sumx = sumx - round(N * paix(i));
X = Xi{i , 1};
elseif i > 1 && i < K
Xi{i , 1} = mvnrnd(miux(i , :) , Cx{i , 1} , round(N * paix(i)));
sumx = sumx - round(N * paix(i));
X = [X ; Xi{i , 1}];
else
Xi{i , 1} = mvnrnd(miux(i , :) , Cx{i , 1} , sumx);
X = [X ; Xi{i , 1}];
end
end
if Cir == 1
[C , ia , ic] = unique(X(1 : NK , :) , 'rows' , 'stable');
XK_train = X(ia , :);
YK_train = g(XK_train);
first_XK = size(XK_train , 1);
for i = 1 : d
if sigma(i) >= 100
theta(i) = 0.01;
elseif (sigma(i) >= 1 && sigma(i) < 100)
theta(i) = 0.1;
else
theta(i) = 1;
end
end
lob = 1e-5 .* ones(1 , d);
upb = 100 .* ones(1 , d);
dmodel = dacefit(XK_train , YK_train , @regpoly0 , @corrgauss , theta , lob , upb);
end
Cr = 1;
jK = 1;
Xh_train = X;
while Cr < 2
if jK == 1
[ug , sigmag] = predictor(Xh_train , dmodel);
prxi = (abs(ug)) ./ sqrt(sigmag);
[PD1(jK) , Iz] = min(prxi);
Cr = min(prxi);
jK = jK + 1;
else
XK_train = [XK_train ; Xh_train(Iz , :)];
z = size(XK_train , 1) - 1;
[C , ia , ic] = unique(XK_train , 'rows' , 'stable');
XK_train = XK_train(ia , :);
if z ~= size(XK_train , 1)
YK_train = [YK_train ; g(Xh_train(Iz , :))];
dmodel = dacefit(XK_train , YK_train , @regpoly0 , @corrgauss , theta , lob , upb);
[ug , sigmag] = predictor(Xh_train , dmodel);
prxi = (abs(ug)) ./ sqrt(sigmag);
else
Xh_train = Xh_train([1 : Iz - 1 , Iz + 1 : end] , :);
prxi = prxi([1 : Iz - 1 , Iz + 1 : end] , :);
end
[PD1(jK) , Iz] = min(prxi);
Cr = min(prxi);
jK = jK + 1;
end
end
jj = jj + jK - 1 - (size(X , 1) - size(Xh_train , 1));
jjx = jjx + size(X , 1) - size(Xh_train , 1);
disp(['该循环Kriging模型迭代次数为:' , num2str(jK - 1 - (size(X , 1) - size(Xh_train , 1))) , ',中间省略的次数为:' , num2str(size(X , 1) - size(Xh_train , 1))]);
% 1.3.3、计算b值判断是否收敛
g_Xh = predictor(X , dmodel);
b1 = sort(g_Xh);
b = b1(fix(N / 5)); % fix是为了实现整除
if b > 0
% 1.3.4、计算新的聚类中心
Ix = g_Xh <= b;
gamma = zeros(N , K);
for i = 1 : K
% gamma(: , i) = paix(i , 1) .* hxi_w(X) ./ hx_w(X);
eval(['gamma(: , ',num2str(i),') = paix(',num2str(i),' , 1) .* hx',num2str(i),'_w(X) ./ hx_w(X);'])
end
paix_new = (sum(Ix .* fx(X) ./ hx_w(X) .* gamma)) ./ (sum(Ix .* fx(X) ./ hx_w(X)));
for i = 1 : K
miux_new(i , :) = (sum(Ix .* fx(X) ./ hx_w(X) .* gamma(: , i) .* X)) ./ (sum(Ix .* fx(X) ./ hx_w(X) .* gamma(: , i)));
end
Cx_new = cell(K , 1);
z = fx(X) ./ hx_w(X);
for i = 1 : K
Cx_new{i , 1} = zeros(d , d);
for j = 1 : N
Cx_new{i , 1} = Cx_new{i , 1} + (Ix(j) .* z(j) .* gamma(j , i) .* (X(j , :) - miux(i , :))' * (X(j , :) - miux(i , :))) ./ (sum(Ix .* fx(X) ./ hx_w(X) .* gamma(: , i)));
end
end
paix = paix_new';
miux = miux_new;
Cx = Cx_new;
Cir = Cir + 1;
end
end
% 1.4、计算失效概率
sun_sample = Cir * N % 迭代出结果所需样本数
% 1.4.1、重新抽取样本
Xi = cell(K , 1);
sumx = N1;
for i = 1 : K
if i == 1
Xi{i , 1} = mvnrnd(miux(i , :) , Cx{i , 1} , round(N1 * paix(i)));
sumx = sumx - round(N1 * paix(i));
X = Xi{i , 1};
elseif i > 1 && i < K
Xi{i , 1} = mvnrnd(miux(i , :) , Cx{i , 1} , round(N1 * paix(i)));
sumx = sumx - round(N1 * paix(i));
X = [X ; Xi{i , 1}];
else
Xi{i , 1} = mvnrnd(miux(i , :) , Cx{i , 1} , sumx);
X = [X ; Xi{i , 1}];
end
end
% 1.4.2、再次更新Kriging模型
X1 = X;
for i = 1 : (N1 + 1)
if i == 1
XX_Nx = fix(size(X1 , 1) / Nx);
for XXXX = 1 : Nx
if XXXX ~= Nx
XXX = X1((XX_Nx * (XXXX - 1) + 1) : (XX_Nx * XXXX) , :);
else
XXX = X1((XX_Nx * (XXXX - 1) + 1) : end , :);
end
[ug1 , sigmag1] = predictor(XXX , dmodel);
if XXXX == 1
ug = ug1;
sigmag = sigmag1;
else
ug = [ug ; ug1];
sigmag = [sigmag ; sigmag1];
end
end
prxi = (abs(ug)) ./ sqrt(sigmag);
[PD(i) , Iz] = min(prxi);
else
XK_train = [XK_train ; X1(Iz , :)];
z = size(XK_train , 1) - 1;
[C , ia , ic] = unique(XK_train(: , :) , 'rows' , 'stable');
XK_train = XK_train(ia , :);
if z ~= size(XK_train , 1)
YK_train = [YK_train ; g(X1(Iz , :))];
dmodel = dacefit(XK_train , YK_train , @regpoly0 , @corrgauss , theta , lob , upb);
XX_Nx = fix(size(X1 , 1) / Nx);
for XXXX = 1 : Nx
if XXXX ~= Nx
XXX = X1((XX_Nx * (XXXX - 1) + 1) : (XX_Nx * XXXX) , :);
else
XXX = X1((XX_Nx * (XXXX - 1) + 1) : end , :);
end
[ug1 , sigmag1] = predictor(XXX , dmodel);
if XXXX == 1
ug = ug1;
sigmag = sigmag1;
else
ug = [ug ; ug1];
sigmag = [sigmag ; sigmag1];
end
end
prxi = (abs(ug)) ./ sqrt(sigmag);
else
X1 = X1([1 : Iz - 1 , Iz + 1 : end] , :);
prxi = prxi([1 : Iz - 1 , Iz + 1 : end] , :);
end
[PD(i) , Iz] = min(prxi);
end
Cr(i) = min(prxi);
i
if Cr(i) >= 2
disp(['最终抽样中Kriging模型迭代次数为:' , num2str(i - 1 - (size(X , 1) - size(X1 , 1))) , ',中间省略的次数为:' , num2str(size(X , 1) - size(X1 , 1))]);
break
end
end
if i == N + 1
disp(['最终抽样中Kriging模型已超出迭代次数:' , num2str(i - 1 - (size(X , 1) - size(X1 , 1))),',Cr为:' , num2str(Cr(i)) , ',中间省略的次数为:' , num2str(size(X , 1) - size(X1 , 1))]);
end
disp(['该算法中Kriging模型初始样本数为:', num2str(first_XK) , ',迭代过程中Kriging模型迭代次数为:' , num2str(jj + i - 1 - (size(X , 1) - size(X1 , 1))) , ',中间省略的次数为:' , num2str(jjx + size(X , 1) - size(X1 , 1))]);
zzzzz1 = jj;
zzzzz2 = i - 1 - (size(X , 1) - size(X1 , 1));
% 1.4.3、计算失效概率及变异系数
X_Nx = fix(size(X , 1) / Nx);
for XXXX = 1 : Nx
if XXXX ~= Nx
XXX = X((X_Nx * (XXXX - 1) + 1) : (X_Nx * XXXX) , :);
else
XXX = X((X_Nx * (XXXX - 1) + 1) : end , :);
end
g_X1 = predictor(XXX , dmodel);
if XXXX == 1
g_X = g_X1;
else
g_X = [g_X ; g_X1];
end
end
I = g_X <= 0;
hx = @(x) hx_w(x);
Pf = I' * (fx(X) ./ hx(X)) / N1
Var_Pf = ((I' * (fx(X) .^ 2 ./ hx(X) .^ 2)) / N1 - Pf ^ 2) / (N1 - 1);
Cov_Pf = sqrt(Var_Pf) / Pf
% 二、灵敏度
N = N1;
f_h = I .* fx(X) ./ hx(X);
% 注意这里的miu是原变量的miu,与设计点无关
x_miu1 = (X - (miu' * ones(1 , N))') ./ (sigma' * ones(1 , N))' .^ 2;
x_miu2 = (X - (miu' * ones(1 , N))') .^ 2 ./ (sigma' * ones(1 , N))' .^ 2;
% 2.1、对均值
% 2.1.1、对均值的局部灵敏度
SFp_miu = f_h' * x_miu1 / N;
% 2.1.2、对均值的局部灵敏度的方差
% 其中fx对miu的偏导可直接求出函数表达式,然后编代码,注意是各变量相互独立且为正态分布
Var_SFp_miu = ((I ./ hx(X) .* fx(X))' .^ 2 * (x_miu2 ./ (sigma' * ones(1 , N))' .^ 2) / N - SFp_miu .^ 2) / (N - 1);
% 2.1.3、对均值的局部灵敏度的变异系数
Cov_SFp_miu = sqrt(Var_SFp_miu) ./ abs(SFp_miu);
% 2.2、对标准差
% 2.2.1、对标准差的局部灵敏度
SFp_sigma = f_h' * (x_miu2 - 1) ./ sigma / N;
% 2.2.2、对标准差的局部灵敏度的方差
Var_SFp_sigma = (f_h' .^ 2 * (x_miu2 - 1) .^ 2 ./ sigma .^ 2 / N - SFp_sigma .^ 2) / (N - 1); % 对标准差的局部灵敏度的方差
% 2.2.3、对标准差的局部灵敏度的变异系数
Cov_SFp_sigma = sqrt(Var_SFp_sigma) ./ abs(SFp_sigma);
SFp_miu = SFp_miu ./ sigma1
SFp_sigma = SFp_sigma ./ sigma1
if zzzzz == 1
sun_samplex = sun_sample;
zzzzz11 = zzzzz1;
zzzzz22 = zzzzz2;
SPf = Pf;
SPff = Cov_Pf;
SFp_miu11 = SFp_miu;
SFp_sigma11 = SFp_sigma;
else
sun_samplex = [sun_samplex ; sun_sample];
zzzzz11 = [zzzzz11 ; zzzzz1];
zzzzz22 = [zzzzz22 ; zzzzz2];
SPf = [SPf ; Pf];
SPff = [SPff ; Cov_Pf];
SFp_miu11 = [SFp_miu11 ; SFp_miu];
SFp_sigma11 = [SFp_sigma11 ; SFp_sigma];
end
zzzzz
end
E_SPf = mean(SPf);
E_miu = mean(SFp_miu11);
E_sigma = mean(SFp_sigma11);
V_SPf = (sum(SPf .^ 2) - mean(SPf) .^ 2 .* forwhile) ./ (forwhile - 1);
V_miu = (sum(SFp_miu11 .^ 2) - mean(SFp_miu11) .^ 2 .* forwhile) ./ (forwhile - 1);
V_sigma = (sum(SFp_sigma11 .^ 2) - mean(SFp_sigma11) .^ 2 .* forwhile) ./ (forwhile - 1);
mean(sun_samplex)
mean(zzzzz11)
mean(zzzzz22)
E_SPf
C_SPf = sqrt(V_SPf) ./ abs(E_SPf)
E_miu
C_miu = sqrt(V_miu) ./ abs(E_miu)
E_sigma
C_sigma = sqrt(V_sigma) ./ abs(E_sigma)
toc
请问这个程序里面g = @(x) ninebox(x .* sigma1 + miu1);的作用是什么