--- /dev/null
+function [x, k] = preconditioned_conjugate_gradient_method(A,
+ M,
+ b,
+ x0,
+ tolerance,
+ max_iterations)
+ %
+ % Solve,
+ %
+ % Ax = b
+ %
+ % or equivalently,
+ %
+ % min [phi(x) = (1/2)*<Ax,x> + <b,x>]
+ %
+ % using the preconditioned conjugate gradient method (14.56 in
+ % Guler). If ``M`` is the identity matrix, we use the slightly
+ % faster implementation in conjugate_gradient_method.m.
+ %
+ % INPUT:
+ %
+ % - ``A`` -- The coefficient matrix of the system to solve. Must
+ % be positive definite.
+ %
+ % - ``M`` -- The preconditioning matrix. If the actual matrix used
+ % to precondition ``A`` is called ``C``, i.e. ``C^(-1) * Q *
+ % C^(-T) == \bar{Q}``, then M=CC^T. However the matrix ``C`` is
+ % never itself needed. This is explained in Guler, section 14.9.
+ %
+ % - ``b`` -- The right-hand-side of the system to solve.
+ %
+ % - ``x0`` -- The starting point for the search.
+ %
+ % - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in
+ % magnitude) before we stop.
+ %
+ % - ``max_iterations`` -- The maximum number of iterations to
+ % perform.
+ %
+ % OUTPUT:
+ %
+ % - ``x`` - The solution to Ax=b.
+ %
+ % - ``k`` - The ending value of k; that is, the number of
+ % iterations that were performed.
+ %
+ % NOTES:
+ %
+ % All vectors are assumed to be *column* vectors.
+ %
+ % The cited algorithm contains a typo; in "The Preconditioned
+ % Conjugate-Gradient Method", we are supposed to define
+ % d_{0} = -z_{0}, not -r_{0} as written.
+ %
+ % REFERENCES:
+ %
+ % 1. Guler, Osman. Foundations of Optimization. New York, Springer,
+ % 2010.
+ %
+ n = length(x0);
+
+ if (isequal(M, eye(n)))
+ [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations);
+ return;
+ end
+
+ zero_vector = zeros(n, 1);
+
+ k = 0;
+ x = x0; % Eschew the 'k' suffix on 'x' for simplicity.
+ rk = A*x - b; % The first residual must be computed the hard way.
+ zk = M \ rk;
+ dk = -zk;
+
+ for k = [ 0 : max_iterations ]
+ if (norm(rk) < tolerance)
+ % Success.
+ return;
+ end
+
+ % Unfortunately, since we don't know the matrix ``C``, it isn't
+ % easy to compute alpha_k with an existing step size function.
+ alpha_k = (rk' * zk)/(dk' * A * dk);
+ x_next = x + alpha_k*dk;
+ r_next = rk + alpha_k*A*dk;
+ z_next = M \ r_next;
+ beta_next = (r_next' * z_next)/(rk' * zk);
+ d_next = -z_next + beta_next*dk;
+
+ k = k + 1;
+ x = x_next;
+ rk = r_next;
+ zk = z_next;
+ dk = d_next;
+ end
+end
--- /dev/null
+A = [5,1,2; ...
+ 1,6,3; ...
+ 2,3,7];
+
+M = eye(3);
+
+b = [1;2;3];
+
+x0 = [1;1;1];
+
+## Solved over the rationals.
+cgm = conjugate_gradient_method(A, b, x0, 1e-6, 1000);
+pcgm = preconditioned_conjugate_gradient_method(A, M, b, x0, 1e-6, 1000);
+diff = norm(cgm - pcgm);
+
+unit_test_equals("PCGM agrees with CGM when M == I", ...
+ true, ...
+ norm(diff) < 1e-6);
+
+
+## Needs to be symmetric!
+M = [0.97466, 0.24345, 0.54850; ...
+ 0.24345, 0.73251, 0.76639; ...
+ 0.54850, 0.76639, 1.47581];
+
+pcgm = preconditioned_conjugate_gradient_method(A, M, b, x0, 1e-6, 1000);
+diff = norm(cgm - pcgm);
+
+unit_test_equals("PCGM agrees with CGM when M != I", ...
+ true, ...
+ norm(diff) < 1e-6);