X-Git-Url: http://gitweb.michael.orlitzky.com/?p=octave.git;a=blobdiff_plain;f=optimization%2Fconjugate_gradient_method.m;h=2fab666b7794ccb1d583dcc56057e11362bb9307;hp=2c94487a65508e876903343abf457cda853bb929;hb=c40bbb2f7d9073193f107bbe252d4e9c8509a203;hpb=6c0b2e3b90fad51bbbb14f4755127a7920fd59c4 diff --git a/optimization/conjugate_gradient_method.m b/optimization/conjugate_gradient_method.m index 2c94487..2fab666 100644 --- a/optimization/conjugate_gradient_method.m +++ b/optimization/conjugate_gradient_method.m @@ -8,8 +8,7 @@ function [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations) % % min [phi(x) = (1/2)* + ] % - % using the conjugate_gradient_method (Algorithm 5.2 in Nocedal and - % Wright). + % using the conjugate_gradient_method. % % INPUT: % @@ -36,28 +35,18 @@ function [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations) % % All vectors are assumed to be *column* vectors. % - zero_vector = zeros(length(x0), 1); - - k = 0; - x = x0; % Eschew the 'k' suffix on 'x' for simplicity. - rk = A*x - b; % The first residual must be computed the hard way. - pk = -rk; - - for k = [ 0 : max_iterations ] - if (norm(rk) < tolerance) - % Success. - return; - end - - alpha_k = step_length_cgm(rk, A, pk); - x_next = x + alpha_k*pk; - r_next = rk + alpha_k*A*pk; - beta_next = (r_next' * r_next)/(rk' * rk); - p_next = -r_next + beta_next*pk; + % The rather verbose name of this function was chosen to avoid + % conflicts with other implementations. + % + n = length(x0); + M = eye(n); - k = k + 1; - x = x_next; - rk = r_next; - pk = p_next; - end + % The standard CGM is equivalent to the preconditioned CGM is you + % use the identity matrix as your preconditioner. + [x, k] = preconditioned_conjugate_gradient_method(A, ... + M, ... + b, ... + x0, ... + tolerance, ... + max_iterations); end