X-Git-Url: http://gitweb.michael.orlitzky.com/?p=octave.git;a=blobdiff_plain;f=optimization%2Fconjugate_gradient_method.m;h=a6401e5ec21ff963883368576deb59ff0e1b36d0;hp=2c94487a65508e876903343abf457cda853bb929;hb=5ef96eb244b2a52886a31fc9873db479f52a6483;hpb=6c0b2e3b90fad51bbbb14f4755127a7920fd59c4 diff --git a/optimization/conjugate_gradient_method.m b/optimization/conjugate_gradient_method.m index 2c94487..a6401e5 100644 --- a/optimization/conjugate_gradient_method.m +++ b/optimization/conjugate_gradient_method.m @@ -8,8 +8,7 @@ function [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations) % % min [phi(x) = (1/2)* + ] % - % using the conjugate_gradient_method (Algorithm 5.2 in Nocedal and - % Wright). + % using the conjugate_gradient_method. % % INPUT: % @@ -36,28 +35,15 @@ function [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations) % % All vectors are assumed to be *column* vectors. % - zero_vector = zeros(length(x0), 1); + n = length(x0); + M = eye(n); - k = 0; - x = x0; % Eschew the 'k' suffix on 'x' for simplicity. - rk = A*x - b; % The first residual must be computed the hard way. - pk = -rk; - - for k = [ 0 : max_iterations ] - if (norm(rk) < tolerance) - % Success. - return; - end - - alpha_k = step_length_cgm(rk, A, pk); - x_next = x + alpha_k*pk; - r_next = rk + alpha_k*A*pk; - beta_next = (r_next' * r_next)/(rk' * rk); - p_next = -r_next + beta_next*pk; - - k = k + 1; - x = x_next; - rk = r_next; - pk = p_next; - end + % The standard CGM is equivalent to the preconditioned CGM is you + % use the identity matrix as your preconditioner. + [x, k] = preconditioned_conjugate_gradient_method(A, + M, + b, + x0, + tolerance, + max_iterations); end