]> gitweb.michael.orlitzky.com - octave.git/blob - optimization/vanilla_cgm.m
Move conjugate_gradient_method.m to vanilla_cgm.m.
[octave.git] / optimization / vanilla_cgm.m
1 function [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations)
2 %
3 % Solve,
4 %
5 % Ax = b
6 %
7 % or equivalently,
8 %
9 % min [phi(x) = (1/2)*<Ax,x> + <b,x>]
10 %
11 % using the conjugate_gradient_method (Algorithm 5.2 in Nocedal and
12 % Wright).
13 %
14 % INPUT:
15 %
16 % - ``A`` -- The coefficient matrix of the system to solve. Must
17 % be positive definite.
18 %
19 % - ``b`` -- The right-hand-side of the system to solve.
20 %
21 % - ``x0`` -- The starting point for the search.
22 %
23 % - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in
24 % magnitude) before we stop.
25 %
26 % - ``max_iterations`` -- The maximum number of iterations to perform.
27 %
28 % OUTPUT:
29 %
30 % - ``x`` - The solution to Ax=b.
31 %
32 % - ``k`` - The ending value of k; that is, the number of iterations that
33 % were performed.
34 %
35 % NOTES:
36 %
37 % All vectors are assumed to be *column* vectors.
38 %
39 zero_vector = zeros(length(x0), 1);
40
41 k = 0;
42 x = x0; % Eschew the 'k' suffix on 'x' for simplicity.
43 rk = A*x - b; % The first residual must be computed the hard way.
44 pk = -rk;
45
46 for k = [ 0 : max_iterations ]
47 if (norm(rk) < tolerance)
48 % Success.
49 return;
50 end
51
52 alpha_k = step_length_cgm(rk, A, pk);
53 x_next = x + alpha_k*pk;
54 r_next = rk + alpha_k*A*pk;
55 beta_next = (r_next' * r_next)/(rk' * rk);
56 p_next = -r_next + beta_next*pk;
57
58 k = k + 1;
59 x = x_next;
60 rk = r_next;
61 pk = p_next;
62 end
63 end