From 84b8fb9002d091f84d0205e923c3989d0138ec9e Mon Sep 17 00:00:00 2001 From: Michael Orlitzky Date: Wed, 20 Mar 2013 01:16:40 -0400 Subject: [PATCH 1/1] Fix up preconditioned CGM code. Rename 'A' to 'Q' in preconditioned CGM code. Test the preconditioned CGM against the simple implementation. --- ...preconditioned_conjugate_gradient_method.m | 52 ++++++++++--------- ...ditioned_conjugate_gradient_method_tests.m | 17 ++++-- 2 files changed, 41 insertions(+), 28 deletions(-) diff --git a/optimization/preconditioned_conjugate_gradient_method.m b/optimization/preconditioned_conjugate_gradient_method.m index 1442194..6394348 100644 --- a/optimization/preconditioned_conjugate_gradient_method.m +++ b/optimization/preconditioned_conjugate_gradient_method.m @@ -1,4 +1,4 @@ -function [x, k] = preconditioned_conjugate_gradient_method(A, +function [x, k] = preconditioned_conjugate_gradient_method(Q, M, b, x0, @@ -7,11 +7,11 @@ function [x, k] = preconditioned_conjugate_gradient_method(A, % % Solve, % - % Ax = b + % Qx = b % % or equivalently, % - % min [phi(x) = (1/2)* + ] + % min [phi(x) = (1/2)* + ] % % using the preconditioned conjugate gradient method (14.56 in % Guler). If ``M`` is the identity matrix, we use the slightly @@ -19,11 +19,11 @@ function [x, k] = preconditioned_conjugate_gradient_method(A, % % INPUT: % - % - ``A`` -- The coefficient matrix of the system to solve. Must + % - ``Q`` -- The coefficient matrix of the system to solve. Must % be positive definite. % % - ``M`` -- The preconditioning matrix. If the actual matrix used - % to precondition ``A`` is called ``C``, i.e. ``C^(-1) * Q * + % to precondition ``Q`` is called ``C``, i.e. ``C^(-1) * Q * % C^(-T) == \bar{Q}``, then M=CC^T. However the matrix ``C`` is % never itself needed. This is explained in Guler, section 14.9. % @@ -31,7 +31,7 @@ function [x, k] = preconditioned_conjugate_gradient_method(A, % % - ``x0`` -- The starting point for the search. % - % - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in + % - ``tolerance`` -- How close ``Qx`` has to be to ``b`` (in % magnitude) before we stop. % % - ``max_iterations`` -- The maximum number of iterations to @@ -39,7 +39,7 @@ function [x, k] = preconditioned_conjugate_gradient_method(A, % % OUTPUT: % - % - ``x`` - The solution to Ax=b. + % - ``x`` - The solution to Qx=b. % % - ``k`` - The ending value of k; that is, the number of % iterations that were performed. @@ -57,40 +57,42 @@ function [x, k] = preconditioned_conjugate_gradient_method(A, % 1. Guler, Osman. Foundations of Optimization. New York, Springer, % 2010. % - n = length(x0); - - if (isequal(M, eye(n))) - [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations); - return; - end - - zero_vector = zeros(n, 1); + % Set k=0 first, that way the references to xk,rk,zk,dk which + % immediately follow correspond to x0,r0,z0,d0 respectively. k = 0; - x = x0; % Eschew the 'k' suffix on 'x' for simplicity. - rk = A*x - b; % The first residual must be computed the hard way. + + xk = x0; + rk = Q*xk - b; zk = M \ rk; dk = -zk; for k = [ 0 : max_iterations ] if (norm(rk) < tolerance) - % Success. + x = xk; return; end - % Unfortunately, since we don't know the matrix ``C``, it isn't - % easy to compute alpha_k with an existing step size function. - alpha_k = (rk' * zk)/(dk' * A * dk); - x_next = x + alpha_k*dk; - r_next = rk + alpha_k*A*dk; + % Used twice, avoid recomputation. + rkzk = rk' * zk; + + % The term alpha_k*dk appears twice, but so does Q*dk. We can't + % do them both, so we precompute the more expensive operation. + Qdk = Q * dk; + + alpha_k = rkzk/(dk' * Qdk); + x_next = xk + (alpha_k * dk); + r_next = rk + (alpha_k * Qdk); z_next = M \ r_next; - beta_next = (r_next' * z_next)/(rk' * zk); + beta_next = (r_next' * z_next)/rkzk; d_next = -z_next + beta_next*dk; k = k + 1; - x = x_next; + xk = x_next; rk = r_next; zk = z_next; dk = d_next; end + + x = xk; end diff --git a/tests/preconditioned_conjugate_gradient_method_tests.m b/tests/preconditioned_conjugate_gradient_method_tests.m index c58eb55..d44ee42 100644 --- a/tests/preconditioned_conjugate_gradient_method_tests.m +++ b/tests/preconditioned_conjugate_gradient_method_tests.m @@ -3,12 +3,9 @@ A = [5,1,2; ... 2,3,7]; M = eye(3); - b = [1;2;3]; - x0 = [1;1;1]; -## Solved over the rationals. cgm = conjugate_gradient_method(A, b, x0, 1e-6, 1000); pcgm = preconditioned_conjugate_gradient_method(A, M, b, x0, 1e-6, 1000); diff = norm(cgm - pcgm); @@ -17,6 +14,12 @@ unit_test_equals("PCGM agrees with CGM when M == I", ... true, ... norm(diff) < 1e-6); +pcgm_simple = simple_preconditioned_cgm(A, M, b, x0, 1e-6, 1000); +diff = norm(pcgm_simple - pcgm); + +unit_test_equals("PCGM agrees with SimplePCGM when M == I", ... + true, ... + norm(diff) < 1e-6); ## Needs to be symmetric! M = [0.97466, 0.24345, 0.54850; ... @@ -29,3 +32,11 @@ diff = norm(cgm - pcgm); unit_test_equals("PCGM agrees with CGM when M != I", ... true, ... norm(diff) < 1e-6); + + +pcgm_simple = simple_preconditioned_cgm(A, M, b, x0, 1e-6, 1000); +diff = norm(pcgm_simple - pcgm); + +unit_test_equals("PCGM agrees with Simple PCGM when M != I", ... + true, ... + norm(diff) < 1e-6); -- 2.43.2