%
% The initial gradient at x_{0} is not supplied, so we compute it
- % here and begin the loop at k=1.
- x = x0;
- g_k = g(x);
+ % here and begin the loop at k=0.
+ k = 0;
+ xk = x0;
+ gk = g(xk);
- if (norm(g_k) < tolerance)
- % If x_0 is close enough to a solution, there's nothing for us to
- % do! We use g_k (the gradient of f at x_k) instead of d_k because
- % their 2-norms will be the same, and g_k is already stored.
- return;
- end
-
- for k = [1 : max_iterations]
+ while (k <= max_iterations)
% Loop until either of our stopping conditions are met. If the
% loop finishes, we have implicitly met the second stopping
% condition (number of iterations).
- d_k = -g_k;
- alpha_k = step_size(x);
- x = x + (alpha_k * d_k);
- g_k = g(x);
if (norm(g_k) < tolerance)
+ # This catches the k=0 case, too.
+ x = xk;
return;
end
+
+ dk = -gk;
+ alpha_k = step_size(xk);
+ xk = xk + (alpha_k * dk);
+ gk = g(xk);
+
+ % We potentially just performed one more iteration than necessary
+ % in order to simplify the loop. Note that due to the structure of
+ % our loop, we will have k > max_iterations when we fail to
+ % converge.
+ k = k + 1;
end
- % If we make it to the end of the loop, that means we've executed the
- % maximum allowed iterations. The caller should be able to examine the
- % return value ``k`` to determine what happened.
+ x = xk;
end