From b12c6c2a4bf4cef29b2e08b743c92889505c7ed9 Mon Sep 17 00:00:00 2001 From: Michael Orlitzky Date: Fri, 22 Mar 2013 13:39:31 -0400 Subject: [PATCH] Replace ##-style comments with %-style comments in all non-test code. --- advection_matrix.m | 176 +++++++++--------- advection_matrix_sparse.m | 18 +- c_inner_product.m | 40 ++-- c_norm.m | 34 ++-- central_difference.m | 64 +++---- diffusion_matrix_sparse.m | 16 +- divided_difference.m | 40 ++-- divided_difference_coefficients.m | 22 +-- envelope.m | 28 +-- fixed_point_method.m | 34 ++-- forward_euler.m | 76 ++++---- forward_euler1.m | 32 ++-- is_upper_triangular.m | 4 +- legendre_p.m | 28 +-- legendre_p_tilde.m | 34 ++-- misc/even.m | 22 +-- misc/odd.m | 24 +-- newtons_method.m | 46 ++--- optimization/step_length_cgm.m | 72 +++---- optimization/step_length_positive_definite.m | 66 +++---- .../test_functions/extended_powell1.m | 20 +- .../extended_powell_gradient1.m | 16 +- .../test_functions/extended_powell_hessian1.m | 16 +- .../test_functions/extended_rosenbrock1.m | 20 +- .../extended_rosenbrock_gradient1.m | 16 +- .../extended_rosenbrock_hessian1.m | 16 +- optimization/test_functions/himmelblau.m | 28 +-- optimization/test_functions/himmelblau1.m | 10 +- .../test_functions/himmelblau_gradient.m | 8 +- .../test_functions/himmelblau_gradient1.m | 10 +- .../test_functions/himmelblau_hessian.m | 8 +- .../test_functions/himmelblau_hessian1.m | 10 +- optimization/test_functions/powell.m | 14 +- optimization/test_functions/powell1.m | 8 +- optimization/test_functions/powell_gradient.m | 8 +- .../test_functions/powell_gradient1.m | 10 +- optimization/test_functions/powell_hessian.m | 8 +- optimization/test_functions/powell_hessian1.m | 10 +- optimization/test_functions/rosenbrock.m | 16 +- optimization/test_functions/rosenbrock1.m | 10 +- .../test_functions/rosenbrock_gradient.m | 8 +- .../test_functions/rosenbrock_gradient1.m | 10 +- .../test_functions/rosenbrock_hessian.m | 8 +- .../test_functions/rosenbrock_hessian1.m | 10 +- optimization/test_functions/trigonometric1.m | 14 +- .../test_functions/trigonometric_gradient1.m | 16 +- .../test_functions/trigonometric_hessian1.m | 10 +- optimization/test_functions/wood.m | 10 +- optimization/test_functions/wood1.m | 10 +- optimization/test_functions/wood_gradient.m | 8 +- optimization/test_functions/wood_gradient1.m | 10 +- optimization/test_functions/wood_hessian.m | 8 +- optimization/test_functions/wood_hessian1.m | 10 +- partition.m | 44 ++--- permutation_matrices.m | 26 +-- poisson_matrix.m | 84 ++++----- 56 files changed, 713 insertions(+), 711 deletions(-) diff --git a/advection_matrix.m b/advection_matrix.m index 5e76ee9..591d012 100644 --- a/advection_matrix.m +++ b/advection_matrix.m @@ -1,82 +1,82 @@ function S = advection_matrix(integerN, x0, xN) - ## - ## The numerical solution of the advection-diffusion equation, - ## - ## -d*u''(x) + v*u'(x) + r*u = f(x) - ## - ## in one dimension, subject to the boundary conditions, - ## - ## u(x0) = u(xN) - ## - ## u'(x0) = u'(xN) - ## - ## over the interval [x0,xN] gives rise to a linear system: - ## - ## AU = h^2*F - ## - ## where h = 1/n, and A is given by, - ## - ## A = d*K + v*h*S + r*h^2*I. - ## - ## We will call the matrix S the "advection matrix," and it will be - ## understood that the first row (corresponding to j=0) is to be - ## omitted; since we have assumed that when j=0, u(xj) = u(x0) = - ## u(xN) and likewise for u'. ignored (i.e, added later). - ## - ## INPUTS: - ## - ## * ``integerN`` - An integer representing the number of - ## subintervals we should use to approximate `u`. Must be greater - ## than or equal to 2, since we have at least two values for u(x0) - ## and u(xN). - ## - ## * ``f`` - The function on the right hand side of the poisson - ## equation. - ## - ## * ``x0`` - The initial point. - ## - ## * ``xN`` - The terminal point. - ## - ## OUTPUTS: - ## - ## * ``S`` - The NxN matrix of coefficients for the vector [u(x1), - ## ..., u(xN)]. - ## - ## EXAMPLES: - ## - ## For integerN=4, x0=0, and x1=1, we will have four subintervals: - ## - ## [0, 0.25], [0.25, 0.5], [0.5, 0.75], [0.75, 1] - ## - ## The first row of the matrix 'S' should compute the "derivative" - ## at x1=0.25. By the finite difference formula, this is, - ## - ## u'(x1) = (u(x2) - u(x0))/2 - ## - ## = (u(x2) - u(x4))/2 - ## - ## Therefore, the first row of 'S' should look like, - ## - ## 2*S1 = [0, 1, 0, -1] - ## - ## and of course we would have F1 = [0] on the right-hand side. - ## Likewise, the last row of S should correspond to, - ## - ## u'(x4) = (u(x5) - u(x3))/2 - ## - ## = (u(x1) - u(x3))/2 - ## - ## So the last row of S will be, - ## - ## 2*S4 = [1, 0, -1, 0] - ## - ## Each row 'i' in between will have [-1, 0, 1] beginning at column - ## (i-1). So finally, - ## - ## 2*S = [0, 1, 0, -1] - ## [-1, 0, 1, 0] - ## [0, -1, 0, 1] - ## [1, 0, -1, 0] + % + % The numerical solution of the advection-diffusion equation, + % + % -d*u''(x) + v*u'(x) + r*u = f(x) + % + % in one dimension, subject to the boundary conditions, + % + % u(x0) = u(xN) + % + % u'(x0) = u'(xN) + % + % over the interval [x0,xN] gives rise to a linear system: + % + % AU = h^2*F + % + % where h = 1/n, and A is given by, + % + % A = d*K + v*h*S + r*h^2*I. + % + % We will call the matrix S the "advection matrix," and it will be + % understood that the first row (corresponding to j=0) is to be + % omitted; since we have assumed that when j=0, u(xj) = u(x0) = + % u(xN) and likewise for u'. ignored (i.e, added later). + % + % INPUTS: + % + % * ``integerN`` - An integer representing the number of + % subintervals we should use to approximate `u`. Must be greater + % than or equal to 2, since we have at least two values for u(x0) + % and u(xN). + % + % * ``f`` - The function on the right hand side of the poisson + % equation. + % + % * ``x0`` - The initial point. + % + % * ``xN`` - The terminal point. + % + % OUTPUTS: + % + % * ``S`` - The NxN matrix of coefficients for the vector [u(x1), + % ..., u(xN)]. + % + % EXAMPLES: + % + % For integerN=4, x0=0, and x1=1, we will have four subintervals: + % + % [0, 0.25], [0.25, 0.5], [0.5, 0.75], [0.75, 1] + % + % The first row of the matrix 'S' should compute the "derivative" + % at x1=0.25. By the finite difference formula, this is, + % + % u'(x1) = (u(x2) - u(x0))/2 + % + % = (u(x2) - u(x4))/2 + % + % Therefore, the first row of 'S' should look like, + % + % 2*S1 = [0, 1, 0, -1] + % + % and of course we would have F1 = [0] on the right-hand side. + % Likewise, the last row of S should correspond to, + % + % u'(x4) = (u(x5) - u(x3))/2 + % + % = (u(x1) - u(x3))/2 + % + % So the last row of S will be, + % + % 2*S4 = [1, 0, -1, 0] + % + % Each row 'i' in between will have [-1, 0, 1] beginning at column + % (i-1). So finally, + % + % 2*S = [0, 1, 0, -1] + % [-1, 0, 1, 0] + % [0, -1, 0, 1] + % [1, 0, -1, 0] if (integerN < 2) S = NA; @@ -85,28 +85,28 @@ function S = advection_matrix(integerN, x0, xN) [xs,h] = partition(integerN, x0, xN); - ## We cannot evaluate u_xx at the endpoints because our - ## differentiation algorithm relies on the points directly to the - ## left and right of `x`. Since we're starting at j=1 anyway, we cut - ## off two from the beginning. + % We cannot evaluate u_xx at the endpoints because our + % differentiation algorithm relies on the points directly to the + % left and right of `x`. Since we're starting at j=1 anyway, we cut + % off two from the beginning. differentiable_points = xs(3:end-1); - ## These are the coefficient vectors for the u(x0) and u(xn) - ## constraints. There should be N zeros and a single 1. + % These are the coefficient vectors for the u(x0) and u(xn) + % constraints. There should be N zeros and a single 1. the_rest_zeros = zeros(1, integerN - 3); u_x0_coeffs = cat(2, the_rest_zeros, [0.5, 0, -0.5]); u_xN_coeffs = cat(2, [0.5, 0, -0.5], the_rest_zeros); - ## Start with the u(x0) row. + % Start with the u(x0) row. S = u_x0_coeffs; for x = differentiable_points - ## Append each row obtained from the forward Euler method to S. - ## Chop off x0 first. + % Append each row obtained from the forward Euler method to S. + % Chop off x0 first. u_row = central_difference(xs(2:end), x); S = cat(1, S, u_row); end - ## Finally, append the last row for xN. + % Finally, append the last row for xN. S = cat(1, S, u_xN_coeffs); end diff --git a/advection_matrix_sparse.m b/advection_matrix_sparse.m index a89536c..dea3e77 100644 --- a/advection_matrix_sparse.m +++ b/advection_matrix_sparse.m @@ -1,29 +1,29 @@ function S = advection_matrix_sparse(integerN) - ## - ## Sparse version of the advection_matrix function. See - ## advection_matrix.m for details. - ## + % + % Sparse version of the advection_matrix function. See + % advection_matrix.m for details. + % if (integerN < 2) S = NA; return end - ## The ones directly above the diagonal. + % The ones directly above the diagonal. top = [ [zeros(integerN-1, 1), speye(integerN-1)]; ... zeros(1, integerN)]; - ## The negative ones directly below the diagonal. + % The negative ones directly below the diagonal. bottom = [ [zeros(1, integerN-1); ... -speye(integerN-1) ], zeros(integerN, 1)]; - ## Combine the top and bottom. + % Combine the top and bottom. S = top + bottom; - ## Fill in the entries in the corner. + % Fill in the entries in the corner. S(1, integerN) = -1; S(integerN, 1) = 1; - ## And divide the whole thing by 2. + % And divide the whole thing by 2. S = (1/2)*S; end diff --git a/c_inner_product.m b/c_inner_product.m index 1d7bfd1..b53deef 100644 --- a/c_inner_product.m +++ b/c_inner_product.m @@ -1,24 +1,24 @@ function ip = c_inner_product(w, a, b, v1, v2) - ## - ## The usual inner product defined on the space of continuous - ## functions over the interval [a,b]. - ## - ## INPUT: - ## - ## * ``w`` -- The weight function. - ## - ## * ``a`` -- The left endpoint of the interval. - ## - ## * ``b`` -- The right endpoint of the interval. - ## - ## * ``v1`` -- The first vector. - ## - ## * ``v2`` -- The second vector. - ## - ## OUTPUT: - ## - ## The inner product . - ## + % + % The usual inner product defined on the space of continuous + % functions over the interval [a,b]. + % + % INPUT: + % + % * ``w`` -- The weight function. + % + % * ``a`` -- The left endpoint of the interval. + % + % * ``b`` -- The right endpoint of the interval. + % + % * ``v1`` -- The first vector. + % + % * ``v2`` -- The second vector. + % + % OUTPUT: + % + % The inner product . + % integrand = @(x) w(x)*v1(x)*v2(x); ip = quad(integrand, a, b); end diff --git a/c_norm.m b/c_norm.m index 865ff8b..5a4fdc1 100644 --- a/c_norm.m +++ b/c_norm.m @@ -1,20 +1,20 @@ function c_norm = c_norm(w, a, b, v) - ## - ## The norm on C[a,b] induced by c_inner_product. - ## - ## INPUT: - ## - ## * ``w`` -- The weight function. - ## - ## * ``a`` -- The left endpoint of the interval. - ## - ## * ``b`` -- The right endpoint of the interval. - ## - ## * ``v`` -- The vector. - ## - ## OUTPUT: - ## - ## The norm of `v`; that is, the inner product sqrt(). - ## + % + % The norm on C[a,b] induced by c_inner_product. + % + % INPUT: + % + % * ``w`` -- The weight function. + % + % * ``a`` -- The left endpoint of the interval. + % + % * ``b`` -- The right endpoint of the interval. + % + % * ``v`` -- The vector. + % + % OUTPUT: + % + % The norm of `v`; that is, the inner product sqrt(). + % c_norm = sqrt(c_inner_product(w, a, b, v, v)); end diff --git a/central_difference.m b/central_difference.m index ad58fd4..ec9c7ed 100644 --- a/central_difference.m +++ b/central_difference.m @@ -1,34 +1,34 @@ function coefficients = central_difference(xs, x) - ## - ## The first order central difference at x1 is, - ## - ## f'(x1) = (f(x2) - f(x0))/2 - ## - ## where the index x1 is of course arbitrary but x2, x0 are adjacent - ## to x1. The coefficients we seek are the coefficients of f(xj) for - ## j = 1,...,N-2, where N is the length of ``xs``. We omit the first - ## and last coefficient because at x0 and xN, the previous/next - ## value is not available. - ## - ## This should probably take an 'order' parameter as well; see - ## forward_euler(). - ## - ## INPUT: - ## - ## * ``xs`` - The vector of x-coordinates. - ## - ## * ``x`` - The point `x` at which you'd like to evaluate the - ## derivative of the specified `integer_order`. This should be an - ## element of `xs`. - ## - ## OUTPUT: - ## - ## * ``coefficients`` - The vector of coefficients, in order, of - ## f(x0), f(x1), ..., f(xn). - ## + % + % The first order central difference at x1 is, + % + % f'(x1) = (f(x2) - f(x0))/2 + % + % where the index x1 is of course arbitrary but x2, x0 are adjacent + % to x1. The coefficients we seek are the coefficients of f(xj) for + % j = 1,...,N-2, where N is the length of ``xs``. We omit the first + % and last coefficient because at x0 and xN, the previous/next + % value is not available. + % + % This should probably take an 'order' parameter as well; see + % forward_euler(). + % + % INPUT: + % + % * ``xs`` - The vector of x-coordinates. + % + % * ``x`` - The point `x` at which you'd like to evaluate the + % derivative of the specified `integer_order`. This should be an + % element of `xs`. + % + % OUTPUT: + % + % * ``coefficients`` - The vector of coefficients, in order, of + % f(x0), f(x1), ..., f(xn). + % if (length(xs) < 3) - ## We need at least one point other than the first and last. + % We need at least one point other than the first and last. coefficients = NA; return; end @@ -36,16 +36,16 @@ function coefficients = central_difference(xs, x) x_idx = find(xs == x); if (x_idx == 1 || x_idx == length(xs)) - ## You asked for the difference at the first or last element, which - ## we can't do. + % You asked for the difference at the first or last element, which + % we can't do. coefficients = NA; return; end - ## Start with a vector of zeros. + % Start with a vector of zeros. coefficients = zeros(1, length(xs)); - ## And fill in the two values that we know. + % And fill in the two values that we know. coefficients(x_idx - 1) = -1/2; coefficients(x_idx + 1) = 1/2; end diff --git a/diffusion_matrix_sparse.m b/diffusion_matrix_sparse.m index dbe4e2a..600363c 100644 --- a/diffusion_matrix_sparse.m +++ b/diffusion_matrix_sparse.m @@ -1,26 +1,26 @@ function K = diffusion_matrix_sparse(integerN) - ## - ## A sparse representation of the matrix K in the advection-diffusion - ## equation. See advection_matrix.m for details. - ## + % + % A sparse representation of the matrix K in the advection-diffusion + % equation. See advection_matrix.m for details. + % if (integerN < 2) K = NA; return end - ## The negative ones directly above the diagonal. + % The negative ones directly above the diagonal. top = [ [zeros(integerN-1, 1), -speye(integerN-1)]; ... zeros(1, integerN)]; - ## The negative ones directly below the diagonal. + % The negative ones directly below the diagonal. bottom = [ [zeros(1, integerN-1); ... -speye(integerN-1) ], zeros(integerN, 1)]; - ## Combine the top and bottom. + % Combine the top and bottom. K = top + bottom + 2*speye(integerN); - ## Fill in the entries in the corner. + % Fill in the entries in the corner. K(1, integerN) = -1; K(integerN, 1) = -1; end diff --git a/divided_difference.m b/divided_difference.m index 4705f0a..261abed 100644 --- a/divided_difference.m +++ b/divided_difference.m @@ -1,32 +1,32 @@ function dd = divided_difference(f, xs) - ## Compute divided difference of `f` at points `xs`. The argument `xs` - ## is assumed to be a vector containing at least one element. If it - ## contains n elements, the (n-1)st divided difference will be - ## calculated. - ## - ## INPUTS: - ## - ## * ``f`` - The function whose divided differences we want. - ## - ## * ``xs`` - A vector containing x-coordinates. The length of `xs` - ## determines the order of the divided difference. - ## - ## - ## OUTPUTS: - ## - ## * ``dd`` - The divided difference f[xs(1), xs(2),...] - ## + % Compute divided difference of `f` at points `xs`. The argument `xs` + % is assumed to be a vector containing at least one element. If it + % contains n elements, the (n-1)st divided difference will be + % calculated. + % + % INPUTS: + % + % * ``f`` - The function whose divided differences we want. + % + % * ``xs`` - A vector containing x-coordinates. The length of `xs` + % determines the order of the divided difference. + % + % + % OUTPUTS: + % + % * ``dd`` - The divided difference f[xs(1), xs(2),...] + % order = length(xs) - 1; if (order < 0) - ## Can't do anything here. Return nothing. + % Can't do anything here. Return nothing. dd = NA; elseif (order == 0) - ## Our base case. + % Our base case. dd = f(xs(1)); else - ## Order >= 1. + % Order >= 1. cs = divided_difference_coefficients(xs); dd = dot(cs, f(xs)); end diff --git a/divided_difference_coefficients.m b/divided_difference_coefficients.m index a40f559..92e9915 100644 --- a/divided_difference_coefficients.m +++ b/divided_difference_coefficients.m @@ -1,15 +1,15 @@ function coefficients = divided_difference_coefficients(xs) - ## Compute divided difference coefficients of `f` at points `xs`. - ## - ## INPUTS: - ## - ## * ``xs`` - A vector containing x-coordinates. - ## - ## OUTPUTS: - ## - ## * ``coefficients`` - The vector of coefficients such that - ## dot(coefficients, f(xs)) == f[xs]. Used to solve linear systems. - ## + % Compute divided difference coefficients of `f` at points `xs`. + % + % INPUTS: + % + % * ``xs`` - A vector containing x-coordinates. + % + % OUTPUTS: + % + % * ``coefficients`` - The vector of coefficients such that + % dot(coefficients, f(xs)) == f[xs]. Used to solve linear systems. + % coefficients = []; diff --git a/envelope.m b/envelope.m index e54f818..fd66842 100644 --- a/envelope.m +++ b/envelope.m @@ -1,26 +1,26 @@ function envelope = envelope(A) - ## Compute the envelope of the matrix ``A``. The envelope of a matrix - ## is defined as the set of indices, - ## - ## E = { (i,j) : i < j, A(k,j) != 0 for some k <= i } - ## + % Compute the envelope of the matrix ``A``. The envelope of a matrix + % is defined as the set of indices, + % + % E = { (i,j) : i < j, A(k,j) != 0 for some k <= i } + % if (!issymmetric(A) && !is_upper_triangular(A)) - ## The envelope of a matrix is only defined for U-T or symmetric - ## matrices. + % The envelope of a matrix is only defined for U-T or symmetric + % matrices. envelope = {NA}; return; end - ## Start with an empty result, and append to it as we find - ## satisfactory indices. + % Start with an empty result, and append to it as we find + % satisfactory indices. envelope = {}; for j = [ 1 : columns(A) ] - ## Everything below the first non-zero element in a column will be - ## part of the envelope. Since we're moving from top to bottom, we - ## can simply set a flag indicating that we've found the first - ## non-zero element. Thereafter, everything we encounter should be - ## added to the envelope. + % Everything below the first non-zero element in a column will be + % part of the envelope. Since we're moving from top to bottom, we + % can simply set a flag indicating that we've found the first + % non-zero element. Thereafter, everything we encounter should be + % added to the envelope. found_nonzero = false; for i = [ 1 : j-1 ] diff --git a/fixed_point_method.m b/fixed_point_method.m index eb3b61c..a70a1ac 100644 --- a/fixed_point_method.m +++ b/fixed_point_method.m @@ -1,21 +1,21 @@ function [fixed_point, iterations] = fixed_point_method(g, epsilon, x0) - ## Find a fixed_point of the function `g` with initial guess x0. - ## - ## INPUTS: - ## - ## * ``g`` - The function to iterate. - ## - ## * ``epsilon`` - We stop when two successive iterations are within - ## epsilon of each other, taken under the infinity norm. halt the - ## search and return the current approximation. - ## - ## OUTPUTS: - ## - ## * ``fixed_point`` - The fixed point that we found. - ## - ## * ``iterations`` - The number of iterations that we performed - ## during the search. - ## + % Find a fixed_point of the function `g` with initial guess x0. + % + % INPUTS: + % + % * ``g`` - The function to iterate. + % + % * ``epsilon`` - We stop when two successive iterations are within + % epsilon of each other, taken under the infinity norm. halt the + % search and return the current approximation. + % + % OUTPUTS: + % + % * ``fixed_point`` - The fixed point that we found. + % + % * ``iterations`` - The number of iterations that we performed + % during the search. + % iterations = 0; prev = x0; diff --git a/forward_euler.m b/forward_euler.m index ff9c5eb..6984cfc 100644 --- a/forward_euler.m +++ b/forward_euler.m @@ -1,36 +1,36 @@ function coefficients = forward_euler(integer_order, xs, x) - ## - ## Return the coefficients of u(x0), u(x1), ..., u(xn) as a vector. - ## Take for example a first order approximation, with, - ## - ## xs = [x0,x1,x2,x3,x4] - ## - ## f'(x1) ~= [f(x2)-f(x1)]/(x2-x1) - ## - ## This would return [0, -1/(x2-x1), 2/(x2-x1), 0, 0]. This aids the - ## solution of linear systems. - ## - ## - ## INPUTS: - ## - ## * ``integer_order`` - The order of the derivative which we're - ## approximating. - ## - ## * ``xs`` - The vector of x-coordinates. - ## - ## * ``x`` - The point `x` at which you'd like to evaluate the - ## derivative of the specified `integer_order`. This should be an - ## element of `xs`. - ## - ## - ## OUTPUTS: - ## - ## * ``coefficients`` - The vector of coefficients, in order, of - ## f(x0), f(x1), ..., f(xn). - ## + % + % Return the coefficients of u(x0), u(x1), ..., u(xn) as a vector. + % Take for example a first order approximation, with, + % + % xs = [x0,x1,x2,x3,x4] + % + % f'(x1) ~= [f(x2)-f(x1)]/(x2-x1) + % + % This would return [0, -1/(x2-x1), 2/(x2-x1), 0, 0]. This aids the + % solution of linear systems. + % + % + % INPUTS: + % + % * ``integer_order`` - The order of the derivative which we're + % approximating. + % + % * ``xs`` - The vector of x-coordinates. + % + % * ``x`` - The point `x` at which you'd like to evaluate the + % derivative of the specified `integer_order`. This should be an + % element of `xs`. + % + % + % OUTPUTS: + % + % * ``coefficients`` - The vector of coefficients, in order, of + % f(x0), f(x1), ..., f(xn). + % if (integer_order < 0) - ## You have made a grave mistake. + % You have made a grave mistake. coefficients = NA; return; end @@ -41,8 +41,8 @@ function coefficients = forward_euler(integer_order, xs, x) end if (length(xs) < 2) - ## You can't approximate a derivative of order greater than zero - ## with zero or one points! + % You can't approximate a derivative of order greater than zero + % with zero or one points! coefficients = NA return; end @@ -51,15 +51,15 @@ function coefficients = forward_euler(integer_order, xs, x) offset_b = integer_order / 2; offset_f = offset_b; else - ## When the order is odd, we need one more "forward" point than we - ## do "backward" points. + % When the order is odd, we need one more "forward" point than we + % do "backward" points. offset_b = (integer_order - 1) / 2; offset_f = offset_b + 1; end - ## Zero out the coefficients for terms that won't appear. We compute - ## where `x` is, and we just computed how far back/forward we need to - ## look from `x`, so we just need to make the rest zeros. + % Zero out the coefficients for terms that won't appear. We compute + % where `x` is, and we just computed how far back/forward we need to + % look from `x`, so we just need to make the rest zeros. x_idx = find(xs == x); first_nonzero_idx = x_idx - offset_b; last_nonzero_idx = x_idx + offset_f; @@ -70,7 +70,7 @@ function coefficients = forward_euler(integer_order, xs, x) targets = xs(first_nonzero_idx : last_nonzero_idx); - # The multiplier comes from the Taylor expansion. + % The multiplier comes from the Taylor expansion. multiplier = factorial(integer_order); cs = divided_difference_coefficients(targets) * multiplier; diff --git a/forward_euler1.m b/forward_euler1.m index 3e1061d..2c1fb22 100644 --- a/forward_euler1.m +++ b/forward_euler1.m @@ -1,20 +1,20 @@ function y = forward_euler1(x0, y0, f, h) - ## Compute one iteration of the forward Euler method. - ## - ## INPUT: - ## - ## * ``x0`` - The initial x-coordinate. - ## - ## * ``y0`` - The initial value y(x0). - ## - ## * ``f`` - The function y' = f(x,y) of two variables. - ## - ## * ``h`` - The step size. - ## - ## OUTPUT: - ## - ## The approximate value of y(x) at x = x0+h. - ## + % Compute one iteration of the forward Euler method. + % + % INPUT: + % + % * ``x0`` - The initial x-coordinate. + % + % * ``y0`` - The initial value y(x0). + % + % * ``f`` - The function y' = f(x,y) of two variables. + % + % * ``h`` - The step size. + % + % OUTPUT: + % + % The approximate value of y(x) at x = x0+h. + % y_prime = f(x0,y0); y = y0 + h * y_prime; end diff --git a/is_upper_triangular.m b/is_upper_triangular.m index b78991d..a5042e1 100644 --- a/is_upper_triangular.m +++ b/is_upper_triangular.m @@ -1,4 +1,6 @@ function isUT = is_upper_triangular(A) - ## Returns true if ``A`` is upper triangular, false otherwise. + % + % Returns true if ``A`` is upper triangular, false otherwise. + % isUT = isequal(A, triu(A)); end diff --git a/legendre_p.m b/legendre_p.m index 0481d47..36f6761 100644 --- a/legendre_p.m +++ b/legendre_p.m @@ -1,25 +1,25 @@ function P = legendre_p(n) - ## Return the `n`th Legendre polynomial. - ## - ## INPUT: - ## - ## * ``n`` - The index of the polynomial that we want. - ## - ## OUTPUT: - ## - ## * ``P`` - A polynomial function of one argument. - ## + % Return the `n`th Legendre polynomial. + % + % INPUT: + % + % * ``n`` - The index of the polynomial that we want. + % + % OUTPUT: + % + % * ``P`` - A polynomial function of one argument. + % if (n < 0) - ## Can't do anything here. Return nothing. + % Can't do anything here. Return nothing. P = NA; elseif (n == 0) - ## One of our base cases. + % One of our base cases. P = @(x) 1; elseif (n == 1) - ## The second base case. + % The second base case. P = @(x) x; else - ## Not one of the base cases, so use the recursive formula. + % Not one of the base cases, so use the recursive formula. prev = legendre_p(n-1); prev_prev = legendre_p(n-2); P = @(x) (1/n).*( (2*n - 1).*x.*prev(x) - (n-1).*prev_prev(x) ); diff --git a/legendre_p_tilde.m b/legendre_p_tilde.m index 2c9d2ab..3bb30be 100644 --- a/legendre_p_tilde.m +++ b/legendre_p_tilde.m @@ -1,24 +1,24 @@ function P_tilde = legendre_p_tilde(n, a, b) - ## Return the `n`th Legendre polynomial scaled to the interval [a,b]. - ## - ## INPUT: - ## - ## * ``n`` - The index of the polynomial that we want. - ## - ## * ``a`` - The left endpoint of the interval. - ## - ## * ``b`` - The right endpoint of the interval. - ## - ## OUTPUT: - ## - ## * ``P_tilde`` - A polynomial function of one argument. - ## + % Return the `n`th Legendre polynomial scaled to the interval [a,b]. + % + % INPUT: + % + % * ``n`` - The index of the polynomial that we want. + % + % * ``a`` - The left endpoint of the interval. + % + % * ``b`` - The right endpoint of the interval. + % + % OUTPUT: + % + % * ``P_tilde`` - A polynomial function of one argument. + % if (n < 0) - ## Can't do anything here. Return nothing. + % Can't do anything here. Return nothing. P = NA; else - ## Compute the Legendre polynomial over [-1,1] and mangle it to fit - ## the interval [a,b]. + % Compute the Legendre polynomial over [-1,1] and mangle it to fit + % the interval [a,b]. P = legendre_p(n); P_tilde = @(x) P( (2/(b-a)).*x + 1 - (2*b)/(b-a) ); end diff --git a/misc/even.m b/misc/even.m index f4f3767..c251e24 100644 --- a/misc/even.m +++ b/misc/even.m @@ -1,14 +1,14 @@ function even = even(integer_n) - ## Returns true if its argument is even; false otherwise. - ## - ## INPUTS: - ## - ## * ``integer_n`` - The integer whose parity you're determining. - ## - ## - ## OUTPUTS: - ## - ## * ``even`` - True if `integer_n` is even, false otherwise. - ## + % Returns true if its argument is even; false otherwise. + % + % INPUTS: + % + % * ``integer_n`` - The integer whose parity you're determining. + % + % + % OUTPUTS: + % + % * ``even`` - True if `integer_n` is even, false otherwise. + % even = rem(integer_n, 2) == 0; end diff --git a/misc/odd.m b/misc/odd.m index b0bc626..c754a12 100644 --- a/misc/odd.m +++ b/misc/odd.m @@ -1,15 +1,15 @@ function odd = odd(integer_n) - ## Returns true if its argument is odd; false otherwise. - ## - ## INPUTS: - ## - ## * ``integer_n`` - The integer whose parity you're determining. - ## - ## - ## OUTPUTS: - ## - ## * ``odd`` - True if `integer_n` is odd, false otherwise. - ## - + % Returns true if its argument is odd; false otherwise. + % + % INPUTS: + % + % * ``integer_n`` - The integer whose parity you're determining. + % + % + % OUTPUTS: + % + % * ``odd`` - True if `integer_n` is odd, false otherwise. + % + odd = !even(integer_n); end diff --git a/newtons_method.m b/newtons_method.m index 8bdd090..2d33c1a 100644 --- a/newtons_method.m +++ b/newtons_method.m @@ -1,25 +1,25 @@ function [root, iterations] = newtons_method(f, f_prime, epsilon, x0) - ## Find a root of the function `f` with initial guess x0. - ## - ## INPUTS: - ## - ## * ``f`` - The function whose root we seek. Must return a column - ## vector or scalar. - ## - ## * ``f_prime`` - The derivative or Jacobian of ``f``. - ## - ## * ``epsilon`` - We stop when the value of `f` becomes less - ## than epsilon. - ## - ## * ``x0`` - An initial guess. Either 1d or a column vector. - ## - ## OUTPUTS: - ## - ## * ``root`` - The root that we found. - ## - ## * ``iterations`` - The number of iterations that we performed - ## during the search. - ## + % Find a root of the function `f` with initial guess x0. + % + % INPUTS: + % + % * ``f`` - The function whose root we seek. Must return a column + % vector or scalar. + % + % * ``f_prime`` - The derivative or Jacobian of ``f``. + % + % * ``epsilon`` - We stop when the value of `f` becomes less + % than epsilon. + % + % * ``x0`` - An initial guess. Either 1d or a column vector. + % + % OUTPUTS: + % + % * ``root`` - The root that we found. + % + % * ``iterations`` - The number of iterations that we performed + % during the search. + % if (columns(x0) > 1) root = NA; @@ -31,8 +31,8 @@ function [root, iterations] = newtons_method(f, f_prime, epsilon, x0) f_val = f(x0); while (norm(f_val, Inf) > epsilon) - ## This uses the modified algorithm (2.11.5) in Atkinson. - ## Should work in 1d, too. + % This uses the modified algorithm (2.11.5) in Atkinson. + % Should work in 1d, too. delta = f_prime(xn) \ f_val; xn = xn - delta; f_val = f(xn); diff --git a/optimization/step_length_cgm.m b/optimization/step_length_cgm.m index 60f7cd4..87062f3 100644 --- a/optimization/step_length_cgm.m +++ b/optimization/step_length_cgm.m @@ -1,40 +1,40 @@ function alpha = step_length_cgm(r, A, p) - ## - ## Compute the step length for the conjugate gradient method (CGM). - ## The CGM attempts to solve, - ## - ## Ax = b - ## - ## or equivalently, - ## - ## min[phi(x) = (1/2) - ] - ## - ## where ``A`` is positive-definite. In the process, we need to - ## compute a number of search directions ``p`` and optimal step - ## lengths ``alpha``; i.e., - ## - ## x_{k+1} = x_{k} + alpha_{k}*p_{k} - ## - ## This function computes alpha_{k} in the formula above. - ## - ## INPUT: - ## - ## - ``r`` -- The residual, Ax - b, at the current step. - ## - ## - ``A`` -- The matrix ``A`` in the formulation above. - ## - ## - ``p`` -- The current search direction. - ## - ## OUTPUT: - ## - ## - ``alpha`` -- The minimizer of ``f(x) = x + alpha*p`` along ``p`. - ## - ## NOTES: - ## - ## All vectors are assumed to be *column* vectors. - ## + % + % Compute the step length for the conjugate gradient method (CGM). + % The CGM attempts to solve, + % + % Ax = b + % + % or equivalently, + % + % min[phi(x) = (1/2) - ] + % + % where ``A`` is positive-definite. In the process, we need to + % compute a number of search directions ``p`` and optimal step + % lengths ``alpha``; i.e., + % + % x_{k+1} = x_{k} + alpha_{k}*p_{k} + % + % This function computes alpha_{k} in the formula above. + % + % INPUT: + % + % - ``r`` -- The residual, Ax - b, at the current step. + % + % - ``A`` -- The matrix ``A`` in the formulation above. + % + % - ``p`` -- The current search direction. + % + % OUTPUT: + % + % - ``alpha`` -- The minimizer of ``f(x) = x + alpha*p`` along ``p`. + % + % NOTES: + % + % All vectors are assumed to be *column* vectors. + % - ## A simple calculation should convince you that the gradient of - ## phi(x) above is Ax - b == r. + % A simple calculation should convince you that the gradient of + % phi(x) above is Ax - b == r. alpha = step_length_positive_definite(r, A, p); end diff --git a/optimization/step_length_positive_definite.m b/optimization/step_length_positive_definite.m index f6248f6..ad63956 100644 --- a/optimization/step_length_positive_definite.m +++ b/optimization/step_length_positive_definite.m @@ -1,36 +1,36 @@ function alpha = step_length_positive_definite(g, Q, p) - ## - ## Find the minimizer alpha of, - ## - ## phi(alpha) = f(x + alpha*p) - ## - ## where ``p`` is a descent direction, - ## - ## f(x) = (1/2) - - ## - ## and ``Q`` is positive-definite. - ## - ## The closed-form solution to this problem is given in Nocedal and - ## Wright, (3.55). - ## - ## INPUT: - ## - ## - ``g`` -- The gradient of f at x. - ## - ## - ``Q`` -- The positive-definite matrix in the definition of - ## ``f`` above. - ## - ## - ``p`` -- The direction in which ``f`` decreases. The line - ## along which we minimize f(x + alpha*p). - ## - ## OUTPUT: - ## - ## - ``alpha`` -- The value which causes ``f`` to decrease the - ## most. - ## - ## NOTES: - ## - ## All vectors are assumed to be *column* vectors. - ## + % + % Find the minimizer alpha of, + % + % phi(alpha) = f(x + alpha*p) + % + % where ``p`` is a descent direction, + % + % f(x) = (1/2) - + % + % and ``Q`` is positive-definite. + % + % The closed-form solution to this problem is given in Nocedal and + % Wright, (3.55). + % + % INPUT: + % + % - ``g`` -- The gradient of f at x. + % + % - ``Q`` -- The positive-definite matrix in the definition of + % ``f`` above. + % + % - ``p`` -- The direction in which ``f`` decreases. The line + % along which we minimize f(x + alpha*p). + % + % OUTPUT: + % + % - ``alpha`` -- The value which causes ``f`` to decrease the + % most. + % + % NOTES: + % + % All vectors are assumed to be *column* vectors. + % alpha = -(g' * p)/(p' * Q * p); end diff --git a/optimization/test_functions/extended_powell1.m b/optimization/test_functions/extended_powell1.m index 2b1da25..2a53f89 100644 --- a/optimization/test_functions/extended_powell1.m +++ b/optimization/test_functions/extended_powell1.m @@ -1,17 +1,17 @@ function f = extended_powell1(x) - ## - ## The extended Powell function. See Dennis & Schnabel, Appendix B, - ## problem #2. - ## - ## This function has a minimum at x=(0,0,...,0) with f(x) == 0. The - ## suggested starting point is x0=(3, -1, 0, 1,..., 3, -1, 0, 1). - ## Since the number of arguments is variable, we take a vector - ## instead of its individual components. - ## + % + % The extended Powell function. See Dennis & Schnabel, Appendix B, + % problem #2. + % + % This function has a minimum at x=(0,0,...,0) with f(x) == 0. The + % suggested starting point is x0=(3, -1, 0, 1,..., 3, -1, 0, 1). + % Since the number of arguments is variable, we take a vector + % instead of its individual components. + % n = length(x); if (odd(n)) - ## 'm' below must be an integer. + % 'm' below must be an integer. f = NA; return; end diff --git a/optimization/test_functions/extended_powell_gradient1.m b/optimization/test_functions/extended_powell_gradient1.m index 7cc7aab..f60d5a8 100644 --- a/optimization/test_functions/extended_powell_gradient1.m +++ b/optimization/test_functions/extended_powell_gradient1.m @@ -1,15 +1,15 @@ function g = extended_powell_gradient1(x) - ## - ## The gradient of the extended Powell function. See - ## extended_powell1.m for more information. - ## - ## Since the number of arguments is variable, we take a vector - ## instead of its individual components. - ## + % + % The gradient of the extended Powell function. See + % extended_powell1.m for more information. + % + % Since the number of arguments is variable, we take a vector + % instead of its individual components. + % n = length(x); if (odd(n)) - ## 'm' below must be an integer. + % 'm' below must be an integer. g = NA; return; end diff --git a/optimization/test_functions/extended_powell_hessian1.m b/optimization/test_functions/extended_powell_hessian1.m index 972e690..4d5f50f 100644 --- a/optimization/test_functions/extended_powell_hessian1.m +++ b/optimization/test_functions/extended_powell_hessian1.m @@ -1,15 +1,15 @@ function H = extended_powell_hessian1(x) - ## - ## The Hessian of the extended Powell function. See - ## extended_powell1.m for more information. - ## - ## Since the number of arguments is variable, we take a vector - ## instead of its individual components. - ## + % + % The Hessian of the extended Powell function. See + % extended_powell1.m for more information. + % + % Since the number of arguments is variable, we take a vector + % instead of its individual components. + % n = length(x); if (odd(n)) - ## 'm' below must be an integer. + % 'm' below must be an integer. H = NA; return; end diff --git a/optimization/test_functions/extended_rosenbrock1.m b/optimization/test_functions/extended_rosenbrock1.m index 1f54520..8d0b4f8 100644 --- a/optimization/test_functions/extended_rosenbrock1.m +++ b/optimization/test_functions/extended_rosenbrock1.m @@ -1,17 +1,17 @@ function f = extended_rosenbrock1(x) - ## - ## The extended Rosenbrock function. See Dennis & Schnabel, Appendix - ## B, problem #1. - ## - ## This function has a minimum at x=(1,1,...,1) with f(x) == 0. The - ## suggested starting point is x0=(-1.2, 1,-1.2, 1,...,-1.2, 1). - ## Since the number of arguments is variable, we take a vector - ## instead of its individual components. - ## + % + % The extended Rosenbrock function. See Dennis & Schnabel, Appendix + % B, problem #1. + % + % This function has a minimum at x=(1,1,...,1) with f(x) == 0. The + % suggested starting point is x0=(-1.2, 1,-1.2, 1,...,-1.2, 1). + % Since the number of arguments is variable, we take a vector + % instead of its individual components. + % n = length(x); if (odd(n)) - ## 'm' below must be an integer. + % 'm' below must be an integer. f = NA; return; end diff --git a/optimization/test_functions/extended_rosenbrock_gradient1.m b/optimization/test_functions/extended_rosenbrock_gradient1.m index 2fd3c8c..cbda9a8 100644 --- a/optimization/test_functions/extended_rosenbrock_gradient1.m +++ b/optimization/test_functions/extended_rosenbrock_gradient1.m @@ -1,15 +1,15 @@ function g = extended_rosenbrock_gradient1(x) - ## - ## The gradient of the extended Rosenbrock function. See - ## extended_rosenbrock1.m for more information. - ## - ## Since the number of arguments is variable, we take a vector - ## instead of its individual components. - ## + % + % The gradient of the extended Rosenbrock function. See + % extended_rosenbrock1.m for more information. + % + % Since the number of arguments is variable, we take a vector + % instead of its individual components. + % n = length(x); if (odd(n)) - ## 'm' below must be an integer. + % 'm' below must be an integer. g = NA; return; end diff --git a/optimization/test_functions/extended_rosenbrock_hessian1.m b/optimization/test_functions/extended_rosenbrock_hessian1.m index 9c3415d..541613d 100644 --- a/optimization/test_functions/extended_rosenbrock_hessian1.m +++ b/optimization/test_functions/extended_rosenbrock_hessian1.m @@ -1,15 +1,15 @@ function H = extended_rosenbrock_hessian1(x) - ## - ## The Hessian of the extended Rosenbrock function. See - ## extended_rosenbrock1.m for more information. - ## - ## Since the number of arguments is variable, we take a vector - ## instead of its individual components. - ## + % + % The Hessian of the extended Rosenbrock function. See + % extended_rosenbrock1.m for more information. + % + % Since the number of arguments is variable, we take a vector + % instead of its individual components. + % n = length(x); if (odd(n)) - ## 'm' below must be an integer. + % 'm' below must be an integer. H = NA; return; end diff --git a/optimization/test_functions/himmelblau.m b/optimization/test_functions/himmelblau.m index c973a86..c151ed6 100644 --- a/optimization/test_functions/himmelblau.m +++ b/optimization/test_functions/himmelblau.m @@ -1,17 +1,17 @@ function f = himmelblau(x1, x2) - ## - ## The eponymous function defined by Himmelblau in his Applied - ## Nonlinear Programming, 1972. - ## - ## It has four identical local minima, - ## - ## * f(3,2) == 0 - ## - ## * f(-2.805118086952745, 3.131312518250573) == 0 - ## - ## * f(-3.779310253377747, -3.283185991286170) == 0 - ## - ## * f(3.584428340330492, -1.848126526964404) == 0 - ## + % + % The eponymous function defined by Himmelblau in his Applied + % Nonlinear Programming, 1972. + % + % It has four identical local minima, + % + % * f(3,2) == 0 + % + % * f(-2.805118086952745, 3.131312518250573) == 0 + % + % * f(-3.779310253377747, -3.283185991286170) == 0 + % + % * f(3.584428340330492, -1.848126526964404) == 0 + % f = (x1^2 + x2 - 11)^2 + (x1 + x2^2 - 7)^2; end diff --git a/optimization/test_functions/himmelblau1.m b/optimization/test_functions/himmelblau1.m index c975af2..2598db7 100644 --- a/optimization/test_functions/himmelblau1.m +++ b/optimization/test_functions/himmelblau1.m @@ -1,9 +1,9 @@ function f = himmelblau1(x) - ## - ## A version of the Himmelblau function which takes a column - ## 2-vector instead of two distinct arguments. See himmelblau.m for - ## more information. - ## + % + % A version of the Himmelblau function which takes a column + % 2-vector instead of two distinct arguments. See himmelblau.m for + % more information. + % if (length(x) == 2) f = himmelblau(x(1), x(2)); else diff --git a/optimization/test_functions/himmelblau_gradient.m b/optimization/test_functions/himmelblau_gradient.m index b50e7b6..44b59f6 100644 --- a/optimization/test_functions/himmelblau_gradient.m +++ b/optimization/test_functions/himmelblau_gradient.m @@ -1,8 +1,8 @@ function g = himmelblau_gradient(x1,x2) - ## - ## The gradient of the Himmelblau function. See himmelblau.m for - ## more information. - ## + % + % The gradient of the Himmelblau function. See himmelblau.m for + % more information. + % f_x1 = 4*(x1^2 + x2 - 11)*x1 + 2*x2^2 + 2*x1 - 14; f_x2 = 4*(x2^2 + x1 - 7)*x2 + 2*x1^2 + 2*x2 - 22; g = [f_x1; f_x2]; diff --git a/optimization/test_functions/himmelblau_gradient1.m b/optimization/test_functions/himmelblau_gradient1.m index 91db28d..5637fa8 100644 --- a/optimization/test_functions/himmelblau_gradient1.m +++ b/optimization/test_functions/himmelblau_gradient1.m @@ -1,9 +1,9 @@ function g = himmelblau_gradient1(x) - ## - ## A version of the himmelblau_gradient() function which takes a - ## column 2-vector instead of two distinct arguments. See - ## himmelblau_gradient.m for more information. - ## + % + % A version of the himmelblau_gradient() function which takes a + % column 2-vector instead of two distinct arguments. See + % himmelblau_gradient.m for more information. + % if (length(x) == 2) g = himmelblau_gradient(x(1), x(2)); else diff --git a/optimization/test_functions/himmelblau_hessian.m b/optimization/test_functions/himmelblau_hessian.m index 24b277a..b846a6a 100644 --- a/optimization/test_functions/himmelblau_hessian.m +++ b/optimization/test_functions/himmelblau_hessian.m @@ -1,8 +1,8 @@ function H = himmelblau_hessian(x1, x2) - ## - ## The Hessian of the Himmelblau function. See himmelblau.m for more - ## information. - ## + % + % The Hessian of the Himmelblau function. See himmelblau.m for more + % information. + % H = zeros(2,2); H(1,1) = 12*x1^2 + 4*x2 - 42; H(1,2) = 4*x1 + 4*x2; diff --git a/optimization/test_functions/himmelblau_hessian1.m b/optimization/test_functions/himmelblau_hessian1.m index 84efb31..bf4b561 100644 --- a/optimization/test_functions/himmelblau_hessian1.m +++ b/optimization/test_functions/himmelblau_hessian1.m @@ -1,9 +1,9 @@ function H = himmelblau_hessian1(x) - ## - ## A version of the himmelblau_hessian() function which takes a column - ## 2-vector instead of two distinct arguments. See himmelblau_hessian.m - ## for more information. - ## + % + % A version of the himmelblau_hessian() function which takes a column + % 2-vector instead of two distinct arguments. See himmelblau_hessian.m + % for more information. + % if (length(x) == 2) H = himmelblau_hessian(x(1), x(2)); else diff --git a/optimization/test_functions/powell.m b/optimization/test_functions/powell.m index 5031316..5ea74e3 100644 --- a/optimization/test_functions/powell.m +++ b/optimization/test_functions/powell.m @@ -1,11 +1,11 @@ function f = powell(x1,x2,x3,x4) - ## The Powell function. See Dennis & Schnabel, Appendix B, problem - ## #1. (The "regular" Powell function is simply the Extended Powell - ## with m=1). - ## - ## This function has a minimum at x=(0,0,0,0) with f(x) == 0. The - ## suggested starting point is x0=(3,-1,0,1). - ## + % The Powell function. See Dennis & Schnabel, Appendix B, problem + % #1. (The "regular" Powell function is simply the Extended Powell + % with m=1). + % + % This function has a minimum at x=(0,0,0,0) with f(x) == 0. The + % suggested starting point is x0=(3,-1,0,1). + % f = (x1 + 10*x2)^2 + 5*(x3 - x4)^2; f = f + (x2 - 2*x3)^4 + 10*(x1 - x4)^4; end diff --git a/optimization/test_functions/powell1.m b/optimization/test_functions/powell1.m index b5e1771..52937b5 100644 --- a/optimization/test_functions/powell1.m +++ b/optimization/test_functions/powell1.m @@ -1,8 +1,8 @@ function f = powell1(x) - ## - ## A version of the Powell function which takes a column 4-vector as - ## an argument instead of four distinct arguments. - ## + % + % A version of the Powell function which takes a column 4-vector as + % an argument instead of four distinct arguments. + % if (length(x) == 4) f = powell(x(1), x(2), x(3), x(4)); else diff --git a/optimization/test_functions/powell_gradient.m b/optimization/test_functions/powell_gradient.m index 7cea917..835f3fa 100644 --- a/optimization/test_functions/powell_gradient.m +++ b/optimization/test_functions/powell_gradient.m @@ -1,8 +1,8 @@ function g = powell_gradient(x1,x2,x3,x4) - ## - ## The gradient of the Powell function. See powell.m for more - ## information. - ## + % + % The gradient of the Powell function. See powell.m for more + % information. + % f_x1 = 40*(x1 - x4)^3 + 2*x1 + 20*x2; f_x2 = 4*(x2 - 2*x3)^3 + 20*x1 + 200*x2; f_x3 = -8*(x2 - 2*x3)^3 + 10*x3 - 10*x4; diff --git a/optimization/test_functions/powell_gradient1.m b/optimization/test_functions/powell_gradient1.m index e21b315..63868cd 100644 --- a/optimization/test_functions/powell_gradient1.m +++ b/optimization/test_functions/powell_gradient1.m @@ -1,9 +1,9 @@ function g = powell_gradient1(x) - ## - ## A version of the powell_gradient() function which takes a column - ## 4-vector instead of four distinct arguments. See - ## powell_gradient.m for more information. - ## + % + % A version of the powell_gradient() function which takes a column + % 4-vector instead of four distinct arguments. See + % powell_gradient.m for more information. + % if (length(x) == 4) g = powell_gradient(x(1), x(2), x(3), x(4)); else diff --git a/optimization/test_functions/powell_hessian.m b/optimization/test_functions/powell_hessian.m index 48cc390..248149c 100644 --- a/optimization/test_functions/powell_hessian.m +++ b/optimization/test_functions/powell_hessian.m @@ -1,8 +1,8 @@ function H = powell_hessian(x1, x2, x3, x4) - ## - ## The Hessian of the Powell function. See powell.m for more - ## information. - ## + % + % The Hessian of the Powell function. See powell.m for more + % information. + % H = zeros(4,4); H(1,1) = 120*(x1 - x4)^2 + 2; diff --git a/optimization/test_functions/powell_hessian1.m b/optimization/test_functions/powell_hessian1.m index b11d7f8..f734b07 100644 --- a/optimization/test_functions/powell_hessian1.m +++ b/optimization/test_functions/powell_hessian1.m @@ -1,9 +1,9 @@ function H = powell_hessian1(x) - ## - ## A version of the powell_hessian() function which takes a column - ## 4-vector instead of four distinct arguments. See powell_hessian.m - ## for more information. - ## + % + % A version of the powell_hessian() function which takes a column + % 4-vector instead of four distinct arguments. See powell_hessian.m + % for more information. + % if (length(x) == 4) H = powell_hessian(x(1), x(2), x(3), x(4)); else diff --git a/optimization/test_functions/rosenbrock.m b/optimization/test_functions/rosenbrock.m index 8073a0a..22b2730 100644 --- a/optimization/test_functions/rosenbrock.m +++ b/optimization/test_functions/rosenbrock.m @@ -1,11 +1,11 @@ function f = rosenbrock(x1, x2) - ## - ## The Rosenbrock function. See Dennis & Schnabel, Appendix B, problem #1. - ## (The "regular" Rosenbrock function is simply the Extended Rosenbrock - ## with m=1). - ## - ## This function has a minimum at x=(1,1) with f(x) == 0. The - ## suggested starting point is x0=(-1.2, 1). - ## + % + % The Rosenbrock function. See Dennis & Schnabel, Appendix B, problem #1. + % (The "regular" Rosenbrock function is simply the Extended Rosenbrock + % with m=1). + % + % This function has a minimum at x=(1,1) with f(x) == 0. The + % suggested starting point is x0=(-1.2, 1). + % f = 100*(x2 - x1^2)^2 + (1 - x1)^2; end diff --git a/optimization/test_functions/rosenbrock1.m b/optimization/test_functions/rosenbrock1.m index 5e4df05..9aa69c6 100644 --- a/optimization/test_functions/rosenbrock1.m +++ b/optimization/test_functions/rosenbrock1.m @@ -1,9 +1,9 @@ function f = rosenbrock1(x) - ## - ## A version of the Rosenbrock function which takes a column - ## 2-vector instead of two distinct arguments. See rosenbrock.m for - ## more information. - ## + % + % A version of the Rosenbrock function which takes a column + % 2-vector instead of two distinct arguments. See rosenbrock.m for + % more information. + % if (length(x) == 2) f = rosenbrock(x(1), x(2)); else diff --git a/optimization/test_functions/rosenbrock_gradient.m b/optimization/test_functions/rosenbrock_gradient.m index 50c4546..2b741a4 100644 --- a/optimization/test_functions/rosenbrock_gradient.m +++ b/optimization/test_functions/rosenbrock_gradient.m @@ -1,8 +1,8 @@ function g = rosenbrock_gradient(x1, x2) - ## - ## The gradient of the Rosenbrock function. See rosenbrock.m for more - ## information. - ## + % + % The gradient of the Rosenbrock function. See rosenbrock.m for more + % information. + % f_x1 = -400*x1*(x2 - x1^2) + 2*x1 - 2; f_x2 = 200*(x2 - x1^2); g = [f_x1; f_x2]; diff --git a/optimization/test_functions/rosenbrock_gradient1.m b/optimization/test_functions/rosenbrock_gradient1.m index e8a83b6..29fa393 100644 --- a/optimization/test_functions/rosenbrock_gradient1.m +++ b/optimization/test_functions/rosenbrock_gradient1.m @@ -1,9 +1,9 @@ function g = rosenbrock_gradient1(x) - ## - ## A version of the rosenbrock_gradient() function which takes a - ## column 2-vector instead of two distinct arguments. See - ## rosenbrock_gradient.m for more information. - ## + % + % A version of the rosenbrock_gradient() function which takes a + % column 2-vector instead of two distinct arguments. See + % rosenbrock_gradient.m for more information. + % if (length(x) == 2) g = rosenbrock_gradient(x(1), x(2)); else diff --git a/optimization/test_functions/rosenbrock_hessian.m b/optimization/test_functions/rosenbrock_hessian.m index 066b5ae..ccbb84d 100644 --- a/optimization/test_functions/rosenbrock_hessian.m +++ b/optimization/test_functions/rosenbrock_hessian.m @@ -1,8 +1,8 @@ function H = rosenbrock_hessian(x1, x2) - ## - ## The Hessian of the Rosenbrock function. See rosenbrock.m for more - ## information. - ## + % + % The Hessian of the Rosenbrock function. See rosenbrock.m for more + % information. + % H = zeros(2,2); H(1,1) = 1200*x1^2 - 400*x2 + 2; H(1,2) = -400*x1; diff --git a/optimization/test_functions/rosenbrock_hessian1.m b/optimization/test_functions/rosenbrock_hessian1.m index 63a105f..804098e 100644 --- a/optimization/test_functions/rosenbrock_hessian1.m +++ b/optimization/test_functions/rosenbrock_hessian1.m @@ -1,9 +1,9 @@ function H = rosenbrock_hessian1(x) - ## - ## A version of the rosenbrock_hessian() function which takes a column - ## 2-vector instead of two distinct arguments. See rosenbrock_hessian.m - ## for more information. - ## + % + % A version of the rosenbrock_hessian() function which takes a column + % 2-vector instead of two distinct arguments. See rosenbrock_hessian.m + % for more information. + % if (length(x) == 2) H = rosenbrock_hessian(x(1), x(2)); else diff --git a/optimization/test_functions/trigonometric1.m b/optimization/test_functions/trigonometric1.m index f7fd625..e6725ff 100644 --- a/optimization/test_functions/trigonometric1.m +++ b/optimization/test_functions/trigonometric1.m @@ -1,11 +1,11 @@ function f = trigonometric1(x) - ## - ## The trigonometric function. See More, Garbow, and Hillstrom, - ## function #26. - ## - ## This function has a minimum with f(x) == 0. The suggested - ## starting point is x0=(1/n, 1/n,...). - ## + % + % The trigonometric function. See More, Garbow, and Hillstrom, + % function #26. + % + % This function has a minimum with f(x) == 0. The suggested + % starting point is x0=(1/n, 1/n,...). + % n = length(x); f = 0; diff --git a/optimization/test_functions/trigonometric_gradient1.m b/optimization/test_functions/trigonometric_gradient1.m index 134ce86..845dc32 100644 --- a/optimization/test_functions/trigonometric_gradient1.m +++ b/optimization/test_functions/trigonometric_gradient1.m @@ -1,8 +1,8 @@ function g = trigonometric_gradient1(x) - ## - ## The gradient of the trigonometric function. See trigonometric1.m - ## for more information. - ## + % + % The gradient of the trigonometric function. See trigonometric1.m + % for more information. + % n = length(x); g = zeros(n,1); @@ -12,13 +12,13 @@ function g = trigonometric_gradient1(x) f_k = n - cos_sum + k*(1 - cos(x(k))) - sin(x(k)); for j = [ 1 : n ] - ## Add to the jth component of g the partial of f^2 with - ## respect to x(j). The first term that we add here exists - ## regardless of j. + % Add to the jth component of g the partial of f^2 with + % respect to x(j). The first term that we add here exists + % regardless of j. g(j) = g(j) + 2*f_k*sin(x(j)); if (j == k) - ## But this term vanishes when j != k. + % But this term vanishes when j != k. g(j) = g(j) + 2*f_k*k*sin(x(k)) - 2*f_k*cos(x(k)); end end diff --git a/optimization/test_functions/trigonometric_hessian1.m b/optimization/test_functions/trigonometric_hessian1.m index f3e40a4..8435a3a 100644 --- a/optimization/test_functions/trigonometric_hessian1.m +++ b/optimization/test_functions/trigonometric_hessian1.m @@ -1,9 +1,9 @@ function H = trigonometric_hessian1(x) - ## - ## The Hessian of the Trigonometric function. See trigonometric1.m - ## for more information. Not my implementation. Too ugly to - ## recreate. - ## + % + % The Hessian of the Trigonometric function. See trigonometric1.m + % for more information. Not my implementation. Too ugly to + % recreate. + % n = length(x); H = zeros(n,n); diff --git a/optimization/test_functions/wood.m b/optimization/test_functions/wood.m index 3e11a40..eb8b2e7 100644 --- a/optimization/test_functions/wood.m +++ b/optimization/test_functions/wood.m @@ -1,9 +1,9 @@ function f = wood(x1, x2, x3, x4) - ## - ## The Wood function. See Dennis & Schnabel, Appendix B, problem #5. - ## This function has a global minimum at x=(1,1,1,1) with f(x) == 0. - ## The suggested starting point is x0=(-3, -1, -3, -1). - ## + % + % The Wood function. See Dennis & Schnabel, Appendix B, problem #5. + % This function has a global minimum at x=(1,1,1,1) with f(x) == 0. + % The suggested starting point is x0=(-3, -1, -3, -1). + % f = 100*(x1^2 - x2)^2 + (x1 - 1)^2 + (x3 - 1)^2; f = f + 90*(x3^2 - x4)^2; f = f + 10.1*((x2 - 1)^2 + (x4 - 1)^2); diff --git a/optimization/test_functions/wood1.m b/optimization/test_functions/wood1.m index c34de26..c0bfa20 100644 --- a/optimization/test_functions/wood1.m +++ b/optimization/test_functions/wood1.m @@ -1,9 +1,9 @@ function f = wood1(x) - ## - ## A version of the Wood function which takes a column 4-vector - ## instead of four distinct arguments. See wood.m for more - ## information. - ## + % + % A version of the Wood function which takes a column 4-vector + % instead of four distinct arguments. See wood.m for more + % information. + % if (length(x) == 4) f = wood(x(1), x(2), x(3), x(4)); else diff --git a/optimization/test_functions/wood_gradient.m b/optimization/test_functions/wood_gradient.m index eb9f3e6..adf37c2 100644 --- a/optimization/test_functions/wood_gradient.m +++ b/optimization/test_functions/wood_gradient.m @@ -1,8 +1,8 @@ function g = wood_gradient(x1, x2, x3, x4) - ## - ## The gradient of the Wood function. See wood.m for more - ## information. - ## + % + % The gradient of the Wood function. See wood.m for more + % information. + % f_x1 = 400*(x1^2 - x2)*x1 + 2*x1 - 2; f_x2 = -200*x1^2 + 220.2*x2 + 19.8*x4 - 40; f_x3 = 360*(x3^2 - x4)*x3 + 2*x3 - 2; diff --git a/optimization/test_functions/wood_gradient1.m b/optimization/test_functions/wood_gradient1.m index eabccc9..bfe86f3 100644 --- a/optimization/test_functions/wood_gradient1.m +++ b/optimization/test_functions/wood_gradient1.m @@ -1,9 +1,9 @@ function g = wood_gradient1(x) - ## - ## A version of the wood_gradient() function which takes a column - ## 4-vector instead of four distinct arguments. See wood_gradient.m - ## for more information. - ## + % + % A version of the wood_gradient() function which takes a column + % 4-vector instead of four distinct arguments. See wood_gradient.m + % for more information. + % if (length(x) == 4) g = wood_gradient(x(1), x(2), x(3), x(4)); else diff --git a/optimization/test_functions/wood_hessian.m b/optimization/test_functions/wood_hessian.m index f8e38c4..e71f180 100644 --- a/optimization/test_functions/wood_hessian.m +++ b/optimization/test_functions/wood_hessian.m @@ -1,8 +1,8 @@ function H = wood_hessian(x1, x2, x3, x4) - ## - ## The Hessian of the Wood function. See wood.m for more - ## information. - ## + % + % The Hessian of the Wood function. See wood.m for more + % information. + % H = zeros(4,4); H(1,1) = 1200*x(1)^2 - 400*x(2) + 2; H(1,2) = -400*x(1); diff --git a/optimization/test_functions/wood_hessian1.m b/optimization/test_functions/wood_hessian1.m index 106cb40..4af006a 100644 --- a/optimization/test_functions/wood_hessian1.m +++ b/optimization/test_functions/wood_hessian1.m @@ -1,9 +1,9 @@ function H = wood_hessian1(x) - ## - ## A version of the wood_hessian() function which takes a column - ## 4-vector instead of four distinct arguments. See wood_hessian.m - ## for more information. - ## + % + % A version of the wood_hessian() function which takes a column + % 4-vector instead of four distinct arguments. See wood_hessian.m + % for more information. + % if (length(x) == 4) H = wood_hessian(x(1), x(2), x(3), x(4)); else diff --git a/partition.m b/partition.m index a084236..99e3892 100644 --- a/partition.m +++ b/partition.m @@ -1,27 +1,27 @@ function [p,delta] = partition(integerN, a, b) - ## Partition the interval [a,b] into integerN subintervals. We do not - ## require that a