]> gitweb.michael.orlitzky.com - octave.git/blob - optimization/preconditioned_conjugate_gradient_method.m
af70af5f14e90b819f7ef2c6b1dc980877a62d7e
[octave.git] / optimization / preconditioned_conjugate_gradient_method.m
1 function [x, k] = preconditioned_conjugate_gradient_method(Q,
2 M,
3 b,
4 x0,
5 tolerance,
6 max_iterations)
7 %
8 % Solve,
9 %
10 % Qx = b
11 %
12 % or equivalently,
13 %
14 % min [phi(x) = (1/2)*<Qx,x> + <b,x>]
15 %
16 % using the preconditioned conjugate gradient method (14.54 in
17 % Guler).
18 %
19 % INPUT:
20 %
21 % - ``Q`` -- The coefficient matrix of the system to solve. Must
22 % be positive definite.
23 %
24 % - ``M`` -- The preconditioning matrix. If the actual matrix used
25 % to precondition ``Q`` is called ``C``, i.e. ``C^(-1) * Q *
26 % C^(-T) == \bar{Q}``, then M=CC^T.
27 %
28 % - ``b`` -- The right-hand-side of the system to solve.
29 %
30 % - ``x0`` -- The starting point for the search.
31 %
32 % - ``tolerance`` -- How close ``Qx`` has to be to ``b`` (in
33 % magnitude) before we stop.
34 %
35 % - ``max_iterations`` -- The maximum number of iterations to
36 % perform.
37 %
38 % OUTPUT:
39 %
40 % - ``x`` - The solution to Qx=b.
41 %
42 % - ``k`` - The ending value of k; that is, the number of
43 % iterations that were performed.
44 %
45 % NOTES:
46 %
47 % All vectors are assumed to be *column* vectors.
48 %
49 % REFERENCES:
50 %
51 % 1. Guler, Osman. Foundations of Optimization. New York, Springer,
52 % 2010.
53 %
54
55 Ct = chol(M);
56 C = Ct';
57 C_inv = inv(C);
58 Ct_inv = inv(Ct);
59
60 Q_bar = C_inv * Q * Ct_inv;
61 b_bar = C_inv * b;
62
63 % The solution to Q_bar*x_bar == b_bar is x_bar = Ct*x.
64 [x_bar, k] = conjugate_gradient_method(Q_bar, b_bar, x0, tolerance, max_iterations);
65 x = Ct_inv * x_bar;
66 end