接上一篇文章,这次分享一下其他的三种算法,上一篇编写的不精确一维线搜索需要用到,链接在这:大连理工大学 2021年最优化方法大作业(1)_JiangTesla的博客-优快云博客
下一道题在这:大连理工大学2021最优化方法大作业(3)_JiangTesla的博客-优快云博客
2022题目的小补充大连理工大学2022上半年最优化方法大作业_Jiang_Tesla的博客-优快云博客
目录
1.牛顿法
牛顿法的迭代方式非常简单粗暴,不需要一维搜索,直接梯度乘上hesse阵的逆就行了,框图逻辑见下图(图画的不太对,循环那个箭头应该指向菱形上端,相信各位可以理解)
直接上代码了
%下面3个是输入
x = [0;0];
eps = 0.0001;
start_newton(x,eps);
%题目方程式
function f = fun(x)
f = 10*(x(1)-1)^2 + (x(2)+1)^4;
end
%题目方程式的hesse阵
function h = hesse(x)
h = zeros(2,2);
h(1,1)=2+400*(3*x(1)^2-x(2));
h(1,2)=-400*x(1);
h(2,1)=-400*x(1);
h(2,2)=200;
end
题目方程式的梯度
function g = grad(x)
g = zeros(2,1);
g(1)=20*(x(1)-1);
g(2) = 4*(x(2)+1)^3;
end
%牛顿法迭代开始
function start_newton(x0,eps)
gk = grad(x0);
res = norm(gk);
k = 0;
while res > eps
fprintf('The %d-th iteration, the residual is %f\n',k,res);
fprintf('x=[%f,%f],min(f):%f\n',x0(1),x0(2),fun(x0));
fprintf('**********************************************\n');
hk = (hesse(x0))^(-1);%hesse阵的逆
x0=x0-hk*gk;
k = k+1;
gk = grad(x0);
res = norm(gk);
end
fprintf('The %d-th iteration, the residual is %f\n',k,res);
fprintf('x=[%f,%f],min(f):%f\n',x0(1),x0(2),fun(x0));
end
2.共轭梯度法
书上有现成的框图,我就不画了
可以看出来,共轭梯度法的搜索方向是一个一个生成的,对于n维问题共轭方向只有n个,所以计算n步之后,以xn为起点,重新生成共轭方向继续迭代,下面上代码,这是共轭方向法的核心,fun(x)还有梯度函数,一维搜索函数(大连理工大学 2021年最优化方法大作业(1)_JiangTesla的博客-优快云博客)都和之前的一样,直接复制粘在同一个文件里就行,我就不总粘重复的代码了。
function start_conjungate_gradient(x0, eps)
n=2%二维问题,所以n等于2
g0 = gradient(x0);%自己定义的梯度函数
s0 = -(g0.');
k = 0;
count = 0;%计算迭代次数
lambda = wolfe_powell(x0,s0);%这个函数我在上一个文章写了,就是一维搜索
x1 = x0 +lambda*s0;
g1 = gradient(x1);
while (norm(g1) > eps)
if k<n-1 %判断是否已经生成了n个共轭方向
v = (norm(g1))^2/(norm(g0)^2);
s1 = -g1 + s0*v;
k = k+1;
x0 = x1;
g0 = gradient(x0);
s0 = s1;
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
else
x0 = x1;
g0 = gradient(x0);
s0 = -(g0.');
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
k = 0;
end
count=count+1;
fprintf('The %d-th iteration, the residual is %f\n',count,norm(g1));
fprintf('x=[%f,%f],min(f):%f\n',x0(1),x0(2),fun(x0));
fprintf('**********************************************\n');
end
fprintf('The %d-th iteration, the residual is %f\n',count,norm(g1));
fprintf('x=[%f,%f],min(f):%f\n',x0(1),x0(2),fun(x0));
end
3.BFGS
先上流程图
H(k+1)的公式懒得打了,书上都有P137,
因为H(k+1)的计算公式比较复杂,我先写了个小函数用来计算
function hk = get_hk(h,x,g)%进来的是列向量
miu = 1 + g.'*h*g/(x.'*g);
fenzi = miu*x*x.'-h*g*x.'-x*g.'*h;
hk = h + fenzi/(x.'*g);
end
下面是核心代码
function start_bfgs(x0, eps)
n=2;%二维所以是2
g0 = gradient(x0);
h0 = eye(2,2);
s0 = -h0*g0.';
k = 0;
count = 0;
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
while (norm(g1) > eps)
if k<n-1
detax = x1 - x0;%下面是计算H(k+1)的步骤
detag = g1.' - g0.';
h1 = get_hk(h0,detax,detag);%上面定义的计算函数
s1 = -h1*g1.';
k = k+1;
x0 = x1;
g0 = gradient(x0);
s0 = s1;
h0 = h1;
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
else
x0 = x1;
g0 = gradient(x0);
h0 = eye(2,2);
s0 = -h0*g0.';
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
k = 0;
end
count=count+1;
fprintf('The %d-th iteration, the residual is %f\n',count,norm(g1));
fprintf('x=[%f,%f],min(f):%f\n',x0(1),x0(2),fun(x0));
fprintf('**********************************************\n');
end
fprintf('The %d-th iteration, the residual is %f\n',count,norm(g1));
fprintf('x=[%f,%f],min(f):%f\n',x1(1),x1(2),fun(x1));
end
最后给出一个bfgs总体代码,方便大家对其他方法的重组
x0 = [0;0];
eps = 1e-4;
start_bfgs(x0, eps);
function start_bfgs(x0, eps)
n=2;
g0 = gradient(x0);
h0 = eye(2,2);
s0 = -h0*g0.';
k = 0;
count = 0;
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
while (norm(g1) > eps)
if k<n-1
detax = x1 - x0;
detag = g1.' - g0.';
h1 = get_hk(h0,detax,detag);
s1 = -h1*g1.';
k = k+1;
x0 = x1;
g0 = gradient(x0);
s0 = s1;
h0 = h1;
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
else
x0 = x1;
g0 = gradient(x0);
h0 = eye(2,2);
s0 = -h0*g0.';
lambda = wolfe_powell(x0,s0);
x1 = x0 +lambda*s0;
g1 = gradient(x1);
k = 0;
end
count=count+1;
fprintf('The %d-th iteration, the residual is %f\n',count,norm(g1));
fprintf('x=[%f,%f],min(f):%f\n',x0(1),x0(2),fun(x0));
fprintf('**********************************************\n');
end
fprintf('The %d-th iteration, the residual is %f\n',count,norm(g1));
fprintf('x=[%f,%f],min(f):%f\n',x1(1),x1(2),fun(x1));
end
function lamda = wolfe_powell(xk,dk)
c1 = 0.1;c2=0.5;
a = 0; b =Inf;
lamda = 1;
while(1)
if ~(fun(xk+lamda*dk)-fun(xk) <= c1*lamda*gradient(xk)*dk)
b = lamda;
lamda = (lamda + a)/2;
continue;
end
if ~(gradient(xk+lamda*dk)*dk >= c2*gradient(xk)*dk)
a = lamda;
lamda = min([2*lamda,(b+lamda)/2]);
continue;
end
break;
end
end
function f = fun(x)
f = 10*(x(1)-1)^2 + (x(2)+1)^4;
end
function g = gradient(x)%这是行向量
g = zeros(1,2);
g(1)=20*(x(1)-1);
g(2) = 4*(x(2)+1)^3;
end
function hk = get_hk(h,x,g)%进来的是列向量
miu = 1 + g.'*h*g/(x.'*g);
fenzi = miu*x*x.'-h*g*x.'-x*g.'*h;
hk = h + fenzi/(x.'*g);
end