From 131e7ef4f30e03286e4a09fc77730a0ca91e7a22 Mon Sep 17 00:00:00 2001 From: Michael Orlitzky Date: Wed, 20 Mar 2013 00:26:44 -0400 Subject: [PATCH] Revert "Commit a simpler version of the preconditioned CGM." This reverts commit af2083885af78b1290c21f2852c6fdba25820918. --- ...preconditioned_conjugate_gradient_method.m | 68 +++++++++++++------ 1 file changed, 49 insertions(+), 19 deletions(-) diff --git a/optimization/preconditioned_conjugate_gradient_method.m b/optimization/preconditioned_conjugate_gradient_method.m index af70af5..1442194 100644 --- a/optimization/preconditioned_conjugate_gradient_method.m +++ b/optimization/preconditioned_conjugate_gradient_method.m @@ -1,4 +1,4 @@ -function [x, k] = preconditioned_conjugate_gradient_method(Q, +function [x, k] = preconditioned_conjugate_gradient_method(A, M, b, x0, @@ -7,29 +7,31 @@ function [x, k] = preconditioned_conjugate_gradient_method(Q, % % Solve, % - % Qx = b + % Ax = b % % or equivalently, % - % min [phi(x) = (1/2)* + ] + % min [phi(x) = (1/2)* + ] % - % using the preconditioned conjugate gradient method (14.54 in - % Guler). + % using the preconditioned conjugate gradient method (14.56 in + % Guler). If ``M`` is the identity matrix, we use the slightly + % faster implementation in conjugate_gradient_method.m. % % INPUT: % - % - ``Q`` -- The coefficient matrix of the system to solve. Must + % - ``A`` -- The coefficient matrix of the system to solve. Must % be positive definite. % % - ``M`` -- The preconditioning matrix. If the actual matrix used - % to precondition ``Q`` is called ``C``, i.e. ``C^(-1) * Q * - % C^(-T) == \bar{Q}``, then M=CC^T. + % to precondition ``A`` is called ``C``, i.e. ``C^(-1) * Q * + % C^(-T) == \bar{Q}``, then M=CC^T. However the matrix ``C`` is + % never itself needed. This is explained in Guler, section 14.9. % % - ``b`` -- The right-hand-side of the system to solve. % % - ``x0`` -- The starting point for the search. % - % - ``tolerance`` -- How close ``Qx`` has to be to ``b`` (in + % - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in % magnitude) before we stop. % % - ``max_iterations`` -- The maximum number of iterations to @@ -37,7 +39,7 @@ function [x, k] = preconditioned_conjugate_gradient_method(Q, % % OUTPUT: % - % - ``x`` - The solution to Qx=b. + % - ``x`` - The solution to Ax=b. % % - ``k`` - The ending value of k; that is, the number of % iterations that were performed. @@ -46,21 +48,49 @@ function [x, k] = preconditioned_conjugate_gradient_method(Q, % % All vectors are assumed to be *column* vectors. % + % The cited algorithm contains a typo; in "The Preconditioned + % Conjugate-Gradient Method", we are supposed to define + % d_{0} = -z_{0}, not -r_{0} as written. + % % REFERENCES: % % 1. Guler, Osman. Foundations of Optimization. New York, Springer, % 2010. % + n = length(x0); + + if (isequal(M, eye(n))) + [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations); + return; + end + + zero_vector = zeros(n, 1); + + k = 0; + x = x0; % Eschew the 'k' suffix on 'x' for simplicity. + rk = A*x - b; % The first residual must be computed the hard way. + zk = M \ rk; + dk = -zk; - Ct = chol(M); - C = Ct'; - C_inv = inv(C); - Ct_inv = inv(Ct); + for k = [ 0 : max_iterations ] + if (norm(rk) < tolerance) + % Success. + return; + end - Q_bar = C_inv * Q * Ct_inv; - b_bar = C_inv * b; + % Unfortunately, since we don't know the matrix ``C``, it isn't + % easy to compute alpha_k with an existing step size function. + alpha_k = (rk' * zk)/(dk' * A * dk); + x_next = x + alpha_k*dk; + r_next = rk + alpha_k*A*dk; + z_next = M \ r_next; + beta_next = (r_next' * z_next)/(rk' * zk); + d_next = -z_next + beta_next*dk; - % The solution to Q_bar*x_bar == b_bar is x_bar = Ct*x. - [x_bar, k] = conjugate_gradient_method(Q_bar, b_bar, x0, tolerance, max_iterations); - x = Ct_inv * x_bar; + k = k + 1; + x = x_next; + rk = r_next; + zk = z_next; + dk = d_next; + end end -- 2.44.2