from sage.all import * def is_positive_semidefinite_naive(A): r""" A naive positive-semidefinite check that tests the eigenvalues for nonnegativity. We follow the sage convention that positive (semi)definite matrices must be symmetric or Hermitian. SETUP:: sage: from mjo.ldlt import is_positive_semidefinite_naive TESTS: The trivial matrix is vaciously positive-semidefinite:: sage: A = matrix(QQ, 0) sage: A [] sage: is_positive_semidefinite_naive(A) True """ if A.nrows() == 0: return True # vacuously return A.is_hermitian() and all( v >= 0 for v in A.eigenvalues() ) def ldlt_naive(A): r""" Perform a pivoted `LDL^{T}` factorization of the Hermitian positive-semidefinite matrix `A`. This is a naive, recursive implementation that is inefficient due to Python's lack of tail-call optimization. The pivot strategy is to choose the largest diagonal entry of the matrix at each step, and to permute it into the top-left position. Ultimately this results in a factorization `A = PLDL^{T}P^{T}`, where `P` is a permutation matrix, `L` is unit-lower-triangular, and `D` is diagonal decreasing from top-left to bottom-right. ALGORITHM: The algorithm is based on the discussion in Golub and Van Loan, but with some "typos" fixed. OUTPUT: A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where, * `P` is a permutaiton matrix * `L` is unit lower-triangular * `D` is a diagonal matrix whose entries are decreasing from top-left to bottom-right SETUP:: sage: from mjo.ldlt import ldlt_naive, is_positive_semidefinite_naive EXAMPLES: All three factors should be the identity when the original matrix is:: sage: I = matrix.identity(QQ,4) sage: P,L,D = ldlt_naive(I) sage: P == I and L == I and D == I True TESTS: Ensure that a "random" positive-semidefinite matrix is factored correctly:: sage: set_random_seed() sage: n = ZZ.random_element(5) sage: A = matrix.random(QQ, n) sage: A = A*A.transpose() sage: is_positive_semidefinite_naive(A) True sage: P,L,D = ldlt_naive(A) sage: A == P*L*D*L.transpose()*P.transpose() True """ n = A.nrows() # Use the fraction field of the given matrix so that division will work # when (for example) our matrix consists of integer entries. ring = A.base_ring().fraction_field() if n == 0 or n == 1: # We can get n == 0 if someone feeds us a trivial matrix. P = matrix.identity(ring, n) L = matrix.identity(ring, n) D = A return (P,L,D) A1 = A.change_ring(ring) diags = A1.diagonal() s = diags.index(max(diags)) P1 = copy(A1.matrix_space().identity_matrix()) P1.swap_rows(0,s) A1 = P1.T * A1 * P1 alpha1 = A1[0,0] # Golub and Van Loan mention in passing what to do here. This is # only sensible if the matrix is positive-semidefinite, because we # are assuming that we can set everything else to zero as soon as # we hit the first on-diagonal zero. if alpha1 == 0: P = A1.matrix_space().identity_matrix() L = P D = A1.matrix_space().zero() return (P,L,D) v1 = A1[1:n,0] A2 = A1[1:,1:] P2, L2, D2 = ldlt_naive(A2 - (v1*v1.transpose())/alpha1) P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)], [0*v1, P2]]) L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], [P2.transpose()*v1/alpha1, L2]]) D1 = block_matrix(2,2, [[alpha1, ZZ(0)], [0*v1, D2]]) return (P1,L1,D1) def ldlt_fast(A): r""" Perform a fast, pivoted `LDL^{T}` factorization of the Hermitian positive-semidefinite matrix `A`. This function is much faster than ``ldlt_naive`` because the tail-recursion has been unrolled into a loop. """ ring = A.base_ring().fraction_field() A = A.change_ring(ring) # Keep track of the permutations in a vector rather than in a # matrix, for efficiency. n = A.nrows() p = list(range(n)) for k in range(n): # We need to loop once for every diagonal entry in the # matrix. So, as many times as it has rows/columns. At each # step, we obtain the permutation needed to put things in the # right place, then the "next" entry (alpha) of D, and finally # another column of L. diags = A.diagonal()[k:n] alpha = max(diags) # We're working *within* the matrix ``A``, so every index is # offset by k. For example: after the second step, we should # only be looking at the lower 3-by-3 block of a 5-by-5 matrix. s = k + diags.index(alpha) # Move the largest diagonal element up into the top-left corner # of the block we're working on (the one starting from index k,k). # Presumably this is faster than hitting the thing with a # permutation matrix. # # Since "L" is stored in the lower-left "half" of "A", it's a # good thing that we need to permute "L," too. This is due to # how P2.T appears in the recursive algorithm applied to the # "current" column of L There, P2.T is computed recusively, as # 1 x P3.T, and P3.T = 1 x P4.T, etc, from the bottom up. All # are eventually applied to "v" in order. Here we're working # from the top down, and rather than keep track of what # permutations we need to perform, we just perform them as we # go along. No recursion needed. A.swap_columns(k,s) A.swap_rows(k,s) # Update the permutation "matrix" with the swap we just did. p_k = p[k] p[k] = p[s] p[s] = p_k # Now the largest diagonal is in the top-left corner of the # block below and to the right of index k,k. When alpha is # zero, we can just leave the rest of the D/L entries # zero... which is exactly how they start out. if alpha != 0: # Update the "next" block of A that we'll work on during # the following iteration. I think it's faster to get the # entries of a row than a column here? for i in range(n-k-1): for j in range(i+1): A[k+1+j,k+1+i] = A[k+1+j,k+1+i] - A[k,k+1+j]*A[k,k+1+i]/alpha A[k+1+i,k+1+j] = A[k+1+j,k+1+i] # keep it symmetric! for i in range(n-k-1): # Store the "new" (kth) column of L, being sure to set # the lower-left "half" from the upper-right "half" A[k+i+1,k] = A[k,k+1+i]/alpha MS = A.matrix_space() P = MS.matrix(lambda i,j: p[j] == i) D = MS.diagonal_matrix(A.diagonal()) for i in range(n): A[i,i] = 1 for j in range(i+1,n): A[i,j] = 0 return P,A,D def block_ldlt_naive(A, check_hermitian=False): r""" Perform a block-`LDL^{T}` factorization of the Hermitian matrix `A`. This is a naive, recursive implementation akin to ``ldlt_naive()``, where the pivots (and resulting diagonals) are either `1 \times 1` or `2 \times 2` blocks. The pivots are chosen using the Bunch-Kaufmann scheme that is both fast and numerically stable. OUTPUT: A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where, * `P` is a permutation matrix * `L` is unit lower-triangular * `D` is a block-diagonal matrix whose blocks are of size one or two. """ n = A.nrows() # Use the fraction field of the given matrix so that division will work # when (for example) our matrix consists of integer entries. ring = A.base_ring().fraction_field() if n == 0 or n == 1: # We can get n == 0 if someone feeds us a trivial matrix. # For block-LDLT, n=2 is a base case. P = matrix.identity(ring, n) L = matrix.identity(ring, n) D = A return (P,L,D) alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8) A1 = A.change_ring(ring) # Bunch-Kaufmann step 1, Higham step "zero." We use Higham's # "omega" notation instead of Bunch-Kaufman's "lamda" because # lambda means other things in the same context. column_1_subdiag = [ a_i1.abs() for a_i1 in A1[1:,0].list() ] omega_1 = max([ a_i1 for a_i1 in column_1_subdiag ]) if omega_1 == 0: # "There's nothing to do at this step of the algorithm," # which means that our matrix looks like, # # [ 1 0 ] # [ 0 B ] # # We could still do a pivot_one_by_one() here, but it would # pointlessly subract a bunch of zeros and multiply by one. B = A1[1:,1:] one = matrix(ring, 1, 1, [1]) P2, L2, D2 = block_ldlt_naive(B) P1 = block_diagonal_matrix(one, P2) L1 = block_diagonal_matrix(one, L2) D1 = block_diagonal_matrix(one, D2) return (P1,L1,D1) def pivot_one_by_one(M, c=None): # Perform a one-by-one pivot on "M," swapping row/columns "c". # If "c" is None, no swap is performed. if c is not None: P1 = copy(M.matrix_space().identity_matrix()) P1.swap_rows(0,c) M = P1.T * M * P1 # The top-left entry is now our 1x1 pivot. C = M[1:n,0] B = M[1:,1:] P2, L2, D2 = block_ldlt_naive(B - (C*C.transpose())/M[0,0]) if c is None: P1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], [0*C, P2]]) else: P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)], [0*C, P2]]) L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], [P2.transpose()*C/M[0,0], L2]]) D1 = block_matrix(2,2, [[M[0,0], ZZ(0)], [0*C, D2]]) return (P1,L1,D1) if A1[0,0].abs() > alpha*omega_1: return pivot_one_by_one(A1) r = 1 + column_1_subdiag.index(omega_1) # If the matrix is Hermitian, we need only look at the above- # diagonal entries to find the off-diagonal of maximal magnitude. omega_r = max( a_rj.abs() for a_rj in A1[:r,r].list() ) if A1[0,0].abs()*omega_r >= alpha*(omega_1**2): return pivot_one_by_one(A1) if A1[r,r].abs() > alpha*omega_r: # Higham step (3) # Another 1x1 pivot, but this time swapping indices 0,r. return pivot_one_by_one(A1,r) # Higham step (4) # If we made it here, we have to do a 2x2 pivot. P1 = copy(A1.matrix_space().identity_matrix()) P1.swap_rows(1,r) A1 = P1.T * A1 * P1 # The top-left 2x2 submatrix is now our pivot. E = A1[:2,:2] C = A1[2:n,0:2] B = A1[2:,2:] if B.nrows() == 0: # We have a two-by-two matrix that we can do nothing # useful with. P = matrix.identity(ring, n) L = matrix.identity(ring, n) D = A1 return (P,L,D) P2, L2, D2 = block_ldlt_naive(B - (C*E.inverse()*C.transpose())) P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)], [0*C, P2]]) L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], [P2.transpose()*C*E.inverse(), L2]]) D1 = block_diagonal_matrix(E,D2) return (P1,L1,D1) def block_ldlt(A): r""" Perform a block-`LDL^{T}` factorization of the Hermitian matrix `A`. The standard `LDL^{T}` factorization of a positive-definite matrix `A` factors it as `A = LDL^{T}` where `L` is unit-lower-triangular and `D` is diagonal. If one allows row/column swaps via a permutation matrix `P`, then this factorization can be extended to some positive-semidefinite matrices `A` via the factorization `P^{T}AP = LDL^{T}` that places the zeros at the bottom of `D` to avoid division by zero. These factorizations extend easily to complex Hermitian matrices when one replaces the transpose by the conjugate-transpose. However, we can go one step further. If, in addition, we allow `D` to potentially contain `2 \times 2` blocks on its diagonal, then every real or complex Hermitian matrix `A` can be factored as `A = PLDL^{*}P^{T}`. When the row/column swaps are made intelligently, this process is numerically stable over inexact rings like ``RDF``. Bunch and Kaufman describe such a "pivot" scheme that is suitable for the solution of Hermitian systems, and that is how we choose our row and column swaps. OUTPUT: If the input matrix is Hermitian, we return a triple `(P,L,D)` such that `A = PLDL^{*}P^{T}` and * `P` is a permutation matrix, * `L` is unit lower-triangular, * `D` is a block-diagonal matrix whose blocks are of size one or two. If the input matrix is not Hermitian, the output from this function is undefined. EXAMPLES: This three-by-three real symmetric matrix has one positive, one negative, and one zero eigenvalue -- so it is not any flavor of (semi)definite, yet we can still factor it:: sage: A = matrix(QQ, [[0, 1, 0], ....: [1, 1, 2], ....: [0, 2, 0]]) sage: P,L,D = block_ldlt(A) sage: P [0 0 1] [1 0 0] [0 1 0] sage: L [ 1 0 0] [ 2 1 0] [ 1 1/2 1] sage: D [ 1| 0| 0] [--+--+--] [ 0|-4| 0] [--+--+--] [ 0| 0| 0] sage: P.T*A*P == L*D*L.T True This two-by-two matrix has no standard factorization, but it constitutes its own block-factorization:: sage: A = matrix(QQ, [ [0,1], ....: [1,0] ]) sage: block_ldlt(A) ( [1 0] [1 0] [0 1] [0 1], [0 1], [1 0] ) The same is true of the following complex Hermitian matrix:: sage: A = matrix(QQbar, [ [ 0,I], ....: [-I,0] ]) sage: block_ldlt(A) ( [1 0] [1 0] [ 0 I] [0 1], [0 1], [-I 0] ) TESTS: All three factors should be the identity when the original matrix is:: sage: set_random_seed() sage: n = ZZ.random_element(6) sage: I = matrix.identity(QQ,n) sage: P,L,D = block_ldlt(I) sage: P == I and L == I and D == I True Ensure that a "random" real symmetric matrix is factored correctly:: sage: set_random_seed() sage: n = ZZ.random_element(6) sage: F = NumberField(x^2 +1, 'I') sage: A = matrix.random(F, n) sage: A = A + A.transpose() sage: P,L,D = block_ldlt(A) sage: A == P*L*D*L.transpose()*P.transpose() True Ensure that a "random" complex Hermitian matrix is factored correctly:: sage: set_random_seed() sage: n = ZZ.random_element(6) sage: F = NumberField(x^2 +1, 'I') sage: A = matrix.random(F, n) sage: A = A + A.conjugate_transpose() sage: P,L,D = block_ldlt(A) sage: A == P*L*D*L.transpose()*P.transpose() True Ensure that a "random" complex positive-semidefinite matrix is factored correctly and that the resulting block-diagonal matrix is in fact diagonal:: sage: set_random_seed() sage: n = ZZ.random_element(6) sage: F = NumberField(x^2 +1, 'I') sage: A = matrix.random(F, n) sage: A = A*A.conjugate_transpose() sage: P,L,D = block_ldlt(A) sage: A == P*L*D*L.transpose()*P.transpose() True sage: diagonal_matrix(D.diagonal()) == D True The factorization should be a no-op on diagonal matrices:: sage: set_random_seed() sage: n = ZZ.random_element(6) sage: A = matrix.diagonal(random_vector(QQ, n)) sage: I = matrix.identity(QQ,n) sage: P,L,D = block_ldlt(A) sage: P == I and L == I and A == D True """ # We have to make at least one copy of the input matrix so that we # can change the base ring to its fraction field. Both "L" and the # intermediate Schur complements will potentially have entries in # the fraction field. However, we don't need to make *two* copies. # We can't store the entries of "D" and "L" in the same matrix if # "D" will contain any 2x2 blocks; but we can still store the # entries of "L" in the copy of "A" that we're going to make. # Contrast this with the non-block LDL^T factorization where the # entries of both "L" and "D" overwrite the lower-left half of "A". # # This grants us an additional speedup, since we don't have to # permute the rows/columns of "L" *and* "A" at each iteration. ring = A.base_ring().fraction_field() A = A.change_ring(ring) MS = A.matrix_space() # The magic constant used by Bunch-Kaufman alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8) # Keep track of the permutations and diagonal blocks in a vector # rather than in a matrix, for efficiency. n = A.nrows() p = list(range(n)) d = [] def swap_rows_columns(M, k, s): r""" Swap rows/columns ``k`` and ``s`` of the matrix ``M``, and update the list ``p`` accordingly. """ if s > k: # s == k would swap row/column k with itself, and we don't # actually want to perform the identity permutation. If # you work out the recursive factorization by hand, you'll # notice that the rows/columns of "L" need to be permuted # as well. A nice side effect of storing "L" within "A" # itself is that we can skip that step. The first column # of "L" is hit by all of the transpositions in # succession, and the second column is hit by all but the # first transposition, and so on. M.swap_columns(k,s) M.swap_rows(k,s) p_k = p[k] p[k] = p[s] p[s] = p_k # No return value, we're only interested in the "side effects" # of modifing the matrix M (by reference) and the permutation # list p (which is in scope when this function is defined). return def pivot1x1(M, k, s): r""" Perform a 1x1 pivot swapping rows/columns `k` and `s >= k`. Relies on the fact that matrices are passed by reference, since for performance reasons this routine should overwrite its argument. Updates the local variables ``p`` and ``d`` as well. """ swap_rows_columns(M,k,s) # Now the pivot is in the (k,k)th position. d.append( matrix(ring, 1, [[A[k,k]]]) ) # Compute the Schur complement that we'll work on during # the following iteration, and store it back in the lower- # right-hand corner of "A". for i in range(n-k-1): for j in range(i+1): A[k+1+i,k+1+j] = ( A[k+1+i,k+1+j] - A[k+1+i,k]*A[k,k+1+j]/A[k,k] ) A[k+1+j,k+1+i] = A[k+1+i,k+1+j].conjugate() # stay hermitian! for i in range(n-k-1): # Store the new (kth) column of "L" within the lower- # left-hand corner of "A". A[k+i+1,k] /= A[k,k] # No return value, only the desired side effects of updating # p, d, and A. return k = 0 while k < n: # At each step, we're considering the k-by-k submatrix # contained in the lower-right half of "A", because that's # where we're storing the next iterate. So our indices are # always "k" greater than those of Higham or B&K. Note that # ``n == 0`` is handled by skipping this loop entirely. if k == (n-1): # Handle this trivial case manually, since otherwise the # algorithm's references to the e.g. "subdiagonal" are # meaningless. The corresponding entry of "L" will be # fixed later (since it's an on-diagonal element, it gets # set to one eventually). d.append( matrix(ring, 1, [[A[k,k]]]) ) k += 1 continue # Find the largest subdiagonal entry (in magnitude) in the # kth column. This occurs prior to Step (1) in Higham, # but is part of Step (1) in Bunch and Kaufman. We adopt # Higham's "omega" notation instead of B&K's "lambda" # because "lambda" can lead to some confusion. column_1_subdiag = [ a_ki.abs() for a_ki in A[k+1:,k].list() ] omega_1 = max([ a_ki for a_ki in column_1_subdiag ]) if omega_1 == 0: # In this case, our matrix looks like # # [ a 0 ] # [ 0 B ] # # and we can simply skip to the next step after recording # the 1x1 pivot "a" in the top-left position. The entry "a" # will be adjusted to "1" later on to ensure that "L" is # (block) unit-lower-triangular. d.append( matrix(ring, 1, [[A[k,k]]]) ) k += 1 continue if A[k,k].abs() > alpha*omega_1: # This is the first case in Higham's Step (1), and B&K's # Step (2). Note that we have skipped the part of B&K's # Step (1) where we determine "r", since "r" is not yet # needed and we may waste some time computing it # otherwise. We are performing a 1x1 pivot, but the # rows/columns are already where we want them, so nothing # needs to be permuted. pivot1x1(A,k,k) k += 1 continue # Now back to Step (1) of Higham, where we find the index "r" # that corresponds to omega_1. This is the "else" branch of # Higham's Step (1). r = k + 1 + column_1_subdiag.index(omega_1) # Continuing the "else" branch of Higham's Step (1), and onto # B&K's Step (3) where we find the largest off-diagonal entry # (in magniture) in column "r". Since the matrix is Hermitian, # we need only look at the above-diagonal entries to find the # off-diagonal of maximal magnitude. omega_r = max( a_rj.abs() for a_rj in A[r,k:r].list() ) if A[k,k].abs()*omega_r >= alpha*(omega_1**2): # Step (2) in Higham or Step (4) in B&K. pivot1x1(A,k,k) k += 1 continue if A[r,r].abs() > alpha*omega_r: # This is Step (3) in Higham or Step (5) in B&K. Still a 1x1 # pivot, but this time we need to swap rows/columns k and r. pivot1x1(A,k,r) k += 1 continue # If we've made it this far, we're at Step (4) in Higham or # Step (6) in B&K, where we perform a 2x2 pivot. swap_rows_columns(A,k+1,r) # The top-left 2x2 submatrix (starting at position k,k) is now # our pivot. E = A[k:k+2,k:k+2] d.append(E) C = A[k+2:n,k:k+2] B = A[k+2:,k+2:] # We don't actually need the inverse of E, what we really need # is C*E.inverse(), and that can be found by setting # # X = C*E.inverse() <====> XE = C. # # Then "X" can be found easily by solving a system. Note: I # do not actually know that sage solves the system more # intelligently, but this is still The Right Thing To Do. CE_inverse = E.solve_left(C) schur_complement = B - (CE_inverse*C.conjugate_transpose()) # Compute the Schur complement that we'll work on during # the following iteration, and store it back in the lower- # right-hand corner of "A". for i in range(n-k-2): for j in range(i+1): A[k+2+i,k+2+j] = schur_complement[i,j] A[k+2+j,k+2+i] = schur_complement[j,i] # The on- and above-diagonal entries of "L" will be fixed # later, so we only need to worry about the lower-left entry # of the 2x2 identity matrix that belongs at the top of the # new column of "L". A[k+1,k] = 0 for i in range(n-k-2): for j in range(2): # Store the new (k and (k+1)st) columns of "L" within # the lower-left-hand corner of "A". A[k+i+2,k+j] = CE_inverse[i,j] k += 2 MS = A.matrix_space() P = MS.matrix(lambda i,j: p[j] == i) # Warning: when n == 0, this works, but returns a matrix # whose (nonexistent) entries are in ZZ rather than in # the base ring of P and L. D = block_diagonal_matrix(d) # Overwrite the diagonal and upper-right half of "A", # since we're about to return it as the unit-lower- # triangular "L". for i in range(n): A[i,i] = 1 for j in range(i+1,n): A[i,j] = 0 return (P,A,D)