]> gitweb.michael.orlitzky.com - octave.git/blob - optimization/vanilla_cgm.m
Replace step_length_cgm() with a direct call to step_length_positive_definite().
[octave.git] / optimization / vanilla_cgm.m
1 function [x, k] = vanilla_cgm(A, b, x0, tolerance, max_iterations)
2 %
3 % Solve,
4 %
5 % Ax = b
6 %
7 % or equivalently,
8 %
9 % min [phi(x) = (1/2)*<Ax,x> + <b,x>]
10 %
11 % using the conjugate_gradient_method (Algorithm 5.2 in Nocedal and
12 % Wright).
13 %
14 % INPUT:
15 %
16 % - ``A`` -- The coefficient matrix of the system to solve. Must
17 % be positive definite.
18 %
19 % - ``b`` -- The right-hand-side of the system to solve.
20 %
21 % - ``x0`` -- The starting point for the search.
22 %
23 % - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in
24 % magnitude) before we stop.
25 %
26 % - ``max_iterations`` -- The maximum number of iterations to perform.
27 %
28 % OUTPUT:
29 %
30 % - ``x`` - The solution to Ax=b.
31 %
32 % - ``k`` - The ending value of k; that is, the number of iterations that
33 % were performed.
34 %
35 % NOTES:
36 %
37 % All vectors are assumed to be *column* vectors.
38 %
39
40 sqrt_n = floor(sqrt(length(x0)));
41
42 k = 0;
43 xk = x0;
44 rk = A*xk - b; % The first residual must be computed the hard way.
45 pk = -rk;
46
47 while (k <= max_iterations && norm(rk, 'inf') > tolerance)
48 alpha_k = step_length_positive_definite(rk, A, pk);
49 x_next = xk + alpha_k*pk;
50
51 % Avoid accumulated roundoff errors.
52 if (mod(k, sqrt_n) == 0)
53 r_next = A*x_next - b;
54 else
55 r_next = rk + (alpha_k * A * pk);
56 end
57
58 beta_next = (r_next' * r_next)/(rk' * rk);
59 p_next = -r_next + beta_next*pk;
60
61 k = k + 1;
62 xk = x_next;
63 rk = r_next;
64 pk = p_next;
65 end
66
67 x = xk;
68 end