## stopping condition, so we should too.
##
max_iterations = 100000;
-tolerance = 1e-10;
+tolerance = 1e-8;
## First a simple example.
Q = [5,1,2; ...
g = @(x) Q*x - b; % The gradient of q at x.
% The step size algorithm to use in the steepest descent method.
-step_size = @(x) step_length_positive_definite(g(x), Q);
+step_size = @(x) step_length_positive_definite(g(x), Q, -g(x));
sd = steepest_descent(g, x0, step_size, tolerance, max_iterations);
diff = norm(cgm - sd, 'inf');
g = @(x) Q*x - b; % The gradient of q at x.
% The step size algorithm to use in the steepest descent method.
- step_size = @(x) step_length_positive_definite(g(x), Q);
+ step_size = @(x) step_length_positive_definite(g(x), Q, -g(x));
## pcg() stops when the /relative/ norm falls below tolerance. To
## eliminate the relativity, we divide the tolerance by the
max_iterations, ...
C, ...
C');
- x_sd = steepest_descent(g, x0, step_size, tolerance, max_iterations);
+ [x_sd, k] = steepest_descent(g, x0, step_size, tolerance, max_iterations);
- ## Note: pcg() uses the 2-norm.
- diff = abs(norm(g(x_pcg)) - norm(g(x_sd), 'inf'));
+ diff = norm(x_pcg - x_sd, 'inf');
msg = sprintf("Our steepest descent agrees with Octave's pcg, n=%d.", n);
- unit_test_equals(msg, true, diff <= tolerance);
+
+ ## There's no good way to choose the tolerance here, since each
+ ## individual algorithm terminates based on the (2,infinity)-norm of
+ ## the gradient.
+ unit_test_equals(msg, true, diff <= sqrt(tolerance));
end