+## Used throughout. The CGM uses the infinity norm as the stopping
+## condition, so we had better also.
+max_iterations = 10000;
+tolerance = 1e-10;
+
A = [5,1,2; ...
1,6,3;
2,3,7];
## Solved over the rationals.
expected = [2/73; 11/73; 26/73];
-actual = conjugate_gradient_method(A, b, x0, 1e-6, 1000);
-diff = norm(actual - expected);
+actual = conjugate_gradient_method(A, b, x0, tolerance, max_iterations);
+diff = norm(actual - expected, 'inf');
unit_test_equals("CGM works on an example", ...
true, ...
- norm(diff) < 1e-6);
+ diff < tolerance);
# Let's test Octave's pcg() against our method on some easy matrices.
-max_iterations = 100000;
-tolerance = 1e-11;
for n = [ 5, 10, 25, 50, 100 ]
- A = random_positive_definite_matrix(5, 1000);
+ A = random_positive_definite_matrix(n, 100);
# Assumed by Octave's implementation when you don't supply a
# preconditioner.
- x0 = zeros(5, 1);
- b = unifrnd(-1000, 1000, 5, 1);
- [o_x, o_flag, o_relres, o_iter] = pcg(A, b, tolerance, max_iterations);
- [x, k] = conjugate_gradient_method(A, b, x0, tolerance, max_iterations);
-
- diff = norm(o_x - x);
+ x0 = zeros(n, 1);
+ b = unifrnd(-100, 100, n, 1);
+ g = @(x) A*x - b;
+
+ ## pcg() stops when the /relative/ norm falls below tolerance. To
+ ## eliminate the relativity, we divide the tolerance by the
+ ## quantity that pcg() will divide by.
+ [o_x, o_flag, o_relres, o_iter] = pcg(A, b, tolerance/norm(g(x0)), ...
+ max_iterations);
+ [x, k] = conjugate_gradient_method(A, b, x0, tolerance, ...
+ max_iterations);
+
+ diff = norm(o_x - x, 'inf');
msg = sprintf("Our CGM agrees with Octave's, n=%d.", n);
- unit_test_equals(msg, true, norm(diff) < 1e-10);
+
+ ## There's no good way to choose the tolerance here, since each
+ ## individual algorithm terminates based on the (2,infinity)-norm of
+ ## the gradient. So we use two orders of magnitude.
+ unit_test_equals(msg, true, diff <= sqrt(tolerance));
end