--- /dev/null
+## We can use the steepest descent method to solve Qx=b as in the
+## conjugate gradient method. Like we did there, we test that the
+## steepest descent method agrees with Octave's PCGM and our CGM.
+##
+## Note: The Steepest descent method uses the infinity norm as a
+## stopping condition, so we should too.
+##
+max_iterations = 100000;
+tolerance = 1e-11;
+
+## First a simple example.
+Q = [5,1,2; ...
+ 1,6,3; ...
+ 2,3,7];
+
+b = [1;2;3];
+x0 = [1;1;1];
+
+cgm = conjugate_gradient_method(Q, b, x0, tolerance, max_iterations);
+
+
+q = @(x) (1/2)*x'*Q*x - b'*x;
+g = @(x) Q*x - b; % The gradient of q at x.
+
+% The step size algorithm to use in the steepest descent method.
+step_size = @(x) step_length_positive_definite(g(x), Q, -g(x));
+sd = steepest_descent(g, x0, step_size, tolerance, max_iterations);
+
+diff = norm(cgm - sd, 'inf');
+unit_test_equals("Steepest descent agrees with CGM", ...
+ true, ...
+ diff < 2*tolerance);
+
+
+## Test again Octave's pcg() function.
+for n = [ 5, 10, 25, 50, 100 ]
+ Q = random_positive_definite_matrix(5, 1000);
+ C = random_positive_definite_matrix(5, 1000);
+
+ ## Assumed by Octave's implementation when you don't supply a
+ ## preconditioner.
+ x0 = zeros(5, 1);
+ b = unifrnd(-1000, 1000, 5, 1);
+
+ q = @(x) (1/2)*x'*Q*x - b'*x;
+ g = @(x) Q*x - b; % The gradient of q at x.
+
+ % The step size algorithm to use in the steepest descent method.
+ step_size = @(x) step_length_positive_definite(g(x), Q, -g(x));
+
+ [x_pcg, o_flag, o_relres, o_iter] = pcg(Q, ...
+ b, ...
+ tolerance, ...
+ max_iterations, ...
+ C, ...
+ C');
+ x_sd = steepest_descent(g, x0, step_size, tolerance, max_iterations);
+
+ diff = norm(x_pcg - x_sd, 'inf');
+ msg = sprintf("Our steepest descent agrees with Octave's pcg, n=%d.", n);
+ unit_test_equals(msg, true, diff < 2*tolerance);
+end