From d2a898d4e9937c00b1a81e35c0492a8ed80c8950 Mon Sep 17 00:00:00 2001
From: Michael Orlitzky
Date: Fri, 22 Mar 2013 15:48:26 0400
Subject: [PATCH] Simplify the loop in steepest_descent().

optimization/steepest_descent.m  37 +++++++++++++++++
1 file changed, 19 insertions(+), 18 deletions()
diff git a/optimization/steepest_descent.m b/optimization/steepest_descent.m
index c5bf0bc..1eea7bc 100644
 a/optimization/steepest_descent.m
+++ b/optimization/steepest_descent.m
@@ 43,32 +43,33 @@ function [x, k] = steepest_descent(g, x0, step_size, tolerance, max_iterations)
%
% The initial gradient at x_{0} is not supplied, so we compute it
 % here and begin the loop at k=1.
 x = x0;
 g_k = g(x);
+ % here and begin the loop at k=0.
+ k = 0;
+ xk = x0;
+ gk = g(xk);
 if (norm(g_k) < tolerance)
 % If x_0 is close enough to a solution, there's nothing for us to
 % do! We use g_k (the gradient of f at x_k) instead of d_k because
 % their 2norms will be the same, and g_k is already stored.
 return;
 end

 for k = [1 : max_iterations]
+ while (k <= max_iterations)
% Loop until either of our stopping conditions are met. If the
% loop finishes, we have implicitly met the second stopping
% condition (number of iterations).
 d_k = g_k;
 alpha_k = step_size(x);
 x = x + (alpha_k * d_k);
 g_k = g(x);
if (norm(g_k) < tolerance)
+ # This catches the k=0 case, too.
+ x = xk;
return;
end
+
+ dk = gk;
+ alpha_k = step_size(xk);
+ xk = xk + (alpha_k * dk);
+ gk = g(xk);
+
+ % We potentially just performed one more iteration than necessary
+ % in order to simplify the loop. Note that due to the structure of
+ % our loop, we will have k > max_iterations when we fail to
+ % converge.
+ k = k + 1;
end
 % If we make it to the end of the loop, that means we've executed the
 % maximum allowed iterations. The caller should be able to examine the
 % return value ``k`` to determine what happened.
+ x = xk;
end

2.33.1