]> gitweb.michael.orlitzky.com - octave.git/blob - optimization/conjugate_gradient_method.m
a6401e5ec21ff963883368576deb59ff0e1b36d0
[octave.git] / optimization / conjugate_gradient_method.m
1 function [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations)
2 %
3 % Solve,
4 %
5 % Ax = b
6 %
7 % or equivalently,
8 %
9 % min [phi(x) = (1/2)*<Ax,x> + <b,x>]
10 %
11 % using the conjugate_gradient_method.
12 %
13 % INPUT:
14 %
15 % - ``A`` -- The coefficient matrix of the system to solve. Must
16 % be positive definite.
17 %
18 % - ``b`` -- The right-hand-side of the system to solve.
19 %
20 % - ``x0`` -- The starting point for the search.
21 %
22 % - ``tolerance`` -- How close ``Ax`` has to be to ``b`` (in
23 % magnitude) before we stop.
24 %
25 % - ``max_iterations`` -- The maximum number of iterations to perform.
26 %
27 % OUTPUT:
28 %
29 % - ``x`` - The solution to Ax=b.
30 %
31 % - ``k`` - The ending value of k; that is, the number of iterations that
32 % were performed.
33 %
34 % NOTES:
35 %
36 % All vectors are assumed to be *column* vectors.
37 %
38 n = length(x0);
39 M = eye(n);
40
41 % The standard CGM is equivalent to the preconditioned CGM is you
42 % use the identity matrix as your preconditioner.
43 [x, k] = preconditioned_conjugate_gradient_method(A,
44 M,
45 b,
46 x0,
47 tolerance,
48 max_iterations);
49 end