Add first implementation of the conjugate gradient method and its tests.
[octave.git] / optimization / conjugate_gradient_method.m
1 function x_star = conjugate_gradient_method(A, b, x0, tolerance)
2 ##
3 ## Solve,
4 ##
5 ## Ax = b
6 ##
7 ## or equivalently,
8 ##
9 ## min [phi(x) = (1/2)*<Ax,x> + <b,x>]
10 ##
11 ## using Algorithm 5.2 in Nocedal and Wright.
12 ##
13 ## INPUT:
14 ##
15 ## - ``A`` -- The coefficient matrix of the system to solve. Must
16 ## be positive definite.
17 ##
18 ## - ``b`` -- The right-hand-side of the system to solve.
19 ##
20 ## - ``x0`` -- The starting point for the search.
21 ##
22 ## - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in
23 ## magnitude) before we stop.
24 ##
25 ## OUTPUT:
26 ##
27 ## - ``x_star`` - The solution to Ax=b.
28 ##
29 ## NOTES:
30 ##
31 ## All vectors are assumed to be *column* vectors.
32 ##
33 zero_vector = zeros(length(x0), 1);
34
35 k = 0;
36 xk = x0;
37 rk = A*xk - b; # The first residual must be computed the hard way.
38 pk = -rk;
39
40 while (norm(rk) > tolerance)
41 alpha_k = step_length_cgm(rk, A, pk);
42 x_next = xk + alpha_k*pk;
43 r_next = rk + alpha_k*A*pk;
44 beta_next = (r_next' * r_next)/(rk' * rk);
45 p_next = -r_next + beta_next*pk;
46
47 k = k + 1;
48 xk = x_next;
49 rk = r_next;
50 pk = p_next;
51 end
52
53 x_star = xk;
54 end