+## Used throughout. The PCGM uses the infinity norm as the stopping
+## condition, so we had better also.
+max_iterations = 10000;
+tolerance = 1e-10;
+
+## First a simple example.
A = [5,1,2; ...
1,6,3; ...
2,3,7];
b = [1;2;3];
x0 = [1;1;1];
-cgm = conjugate_gradient_method(A, b, x0, 1e-6, 1000);
-pcgm = preconditioned_conjugate_gradient_method(A, M, b, x0, 1e-6, 1000);
-diff = norm(cgm - pcgm);
+cgm = conjugate_gradient_method(A, b, x0, tolerance, max_iterations);
+pcgm = preconditioned_conjugate_gradient_method(A, ...
+ M, ...
+ b, ...
+ x0, ...
+ tolerance, ...
+ max_iterations);
+diff = norm(cgm - pcgm, 'inf');
unit_test_equals("PCGM agrees with CGM when M == I", ...
true, ...
- norm(diff) < 1e-6);
+ diff < 2*tolerance);
-pcgm_simple = simple_preconditioned_cgm(A, M, b, x0, 1e-6, 1000);
-diff = norm(pcgm_simple - pcgm);
+pcgm_simple = simple_preconditioned_cgm(A, M, b, x0, tolerance, max_iterations);
+diff = norm(pcgm_simple - pcgm, 'inf');
unit_test_equals("PCGM agrees with SimplePCGM when M == I", ...
true, ...
- norm(diff) < 1e-6);
+ diff < 2*tolerance);
## Needs to be symmetric!
M = [0.97466, 0.24345, 0.54850; ...
0.24345, 0.73251, 0.76639; ...
0.54850, 0.76639, 1.47581];
-pcgm = preconditioned_conjugate_gradient_method(A, M, b, x0, 1e-6, 1000);
-diff = norm(cgm - pcgm);
+pcgm = preconditioned_conjugate_gradient_method(A, ...
+ M, ...
+ b, ...
+ x0, ...
+ tolerance, ...
+ max_iterations);
+diff = norm(cgm - pcgm, 'inf');
unit_test_equals("PCGM agrees with CGM when M != I", ...
true, ...
- norm(diff) < 1e-6);
+ diff < 2*tolerance);
-pcgm_simple = simple_preconditioned_cgm(A, M, b, x0, 1e-6, 1000);
-diff = norm(pcgm_simple - pcgm);
+pcgm_simple = simple_preconditioned_cgm(A, M, b, x0, tolerance, max_iterations);
+diff = norm(pcgm_simple - pcgm, 'inf');
unit_test_equals("PCGM agrees with Simple PCGM when M != I", ...
true, ...
- norm(diff) < 1e-6);
+ diff < 2*tolerance);
# Test again Octave's pcg() function.
-max_iterations = 100000;
-tolerance = 1e-11;
-C = random_positive_definite_matrix(5, 1000);
-M = C*C';
-
for n = [ 5, 10, 25, 50, 100 ]
- A = random_positive_definite_matrix(5, 1000);
+ A = random_positive_definite_matrix(n, 100);
+
+ # Use the cholesky factorization as a preconditioner.
+ Ct = perturb(chol(A));
+ C = Ct';
+ M = Ct*C;
# Assumed by Octave's implementation when you don't supply a
# preconditioner.
- x0 = zeros(5, 1);
- b = unifrnd(-1000, 1000, 5, 1);
- [o_x, o_flag, o_relres, o_iter] = pcg(A, b, tolerance, max_iterations, C, C');
+ x0 = zeros(n, 1);
+ b = unifrnd(-100, 100, n, 1);
+ g = @(x) A*x - b;
+
+ ## pcg() stops when the /relative/ norm falls below tolerance. To
+ ## eliminate the relativity, we divide the tolerance by the
+ ## quantity that pcg() will divide by.
+ [o_x, o_flag, o_relres, o_iter] = pcg(A, b, tolerance/norm(g(x0)), ...
+ max_iterations, C, C');
[x, k] = preconditioned_conjugate_gradient_method(A,
M,
- b,
- x0,
- tolerance,
- max_iterations);
- diff = norm(o_x - x);
+ b,
+ x0,
+ tolerance,
+ max_iterations);
+
+ diff = norm(o_x - x, 'inf');
msg = sprintf("Our PCGM agrees with Octave's, n=%d.", n);
- unit_test_equals(msg, true, norm(diff) < 1e-10);
+ ## There's no good way to choose the tolerance here, since each
+ ## individual algorithm terminates based on the (2,infinity)-norm of
+ ## the gradient. So we use two orders of magnitude.
+ unit_test_equals(msg, true, diff <= sqrt(tolerance));
end