X-Git-Url: http://gitweb.michael.orlitzky.com/?a=blobdiff_plain;f=mjo%2Fldlt.py;h=6db747fee7095900d9c2c45ea2000f4a323de5f5;hb=0c818df3caa413ff3c70876a9c981e704ce9db73;hp=d113df36476e49e244a23ec906cef2ba1453a266;hpb=478515477f131a5b05ef0b4a067bfd215ae76731;p=sage.d.git diff --git a/mjo/ldlt.py b/mjo/ldlt.py index d113df3..6db747f 100644 --- a/mjo/ldlt.py +++ b/mjo/ldlt.py @@ -25,6 +25,7 @@ def is_positive_semidefinite_naive(A): return True # vacuously return A.is_hermitian() and all( v >= 0 for v in A.eigenvalues() ) + def ldlt_naive(A): r""" Perform a pivoted `LDL^{T}` factorization of the Hermitian @@ -135,18 +136,12 @@ def ldlt_fast(A): This function is much faster than ``ldlt_naive`` because the tail-recursion has been unrolled into a loop. """ - n = A.nrows() ring = A.base_ring().fraction_field() - A = A.change_ring(ring) - # Don't try to store the results in the lower-left-hand corner of - # "A" itself; there lies madness. - L = copy(A.matrix_space().identity_matrix()) - D = copy(A.matrix_space().zero()) - # Keep track of the permutations in a vector rather than in a # matrix, for efficiency. + n = A.nrows() p = list(range(n)) for k in range(n): @@ -167,48 +162,425 @@ def ldlt_fast(A): # of the block we're working on (the one starting from index k,k). # Presumably this is faster than hitting the thing with a # permutation matrix. + # + # Since "L" is stored in the lower-left "half" of "A", it's a + # good thing that we need to permute "L," too. This is due to + # how P2.T appears in the recursive algorithm applied to the + # "current" column of L There, P2.T is computed recusively, as + # 1 x P3.T, and P3.T = 1 x P4.T, etc, from the bottom up. All + # are eventually applied to "v" in order. Here we're working + # from the top down, and rather than keep track of what + # permutations we need to perform, we just perform them as we + # go along. No recursion needed. A.swap_columns(k,s) A.swap_rows(k,s) - # Have to do L, too, to keep track of the "P2.T" (which is 1 x - # P3.T which is 1 x P4 T)... in the recursive - # algorithm. There, we compute P2^T from the bottom up. Here, - # we apply the permutations one at a time, essentially - # building them top-down (but just applying them instead of - # building them. - L.swap_columns(k,s) - L.swap_rows(k,s) - - # Update the permutation "matrix" with the next swap. + # Update the permutation "matrix" with the swap we just did. p_k = p[k] p[k] = p[s] p[s] = p_k - # Now the largest diagonal is in the top-left corner of - # the block below and to the right of index k,k.... - # Note: same as ``pivot``. - D[k,k] = alpha - - # When alpha is zero, we can just leave the rest of the D/L entries + # Now the largest diagonal is in the top-left corner of the + # block below and to the right of index k,k. When alpha is + # zero, we can just leave the rest of the D/L entries # zero... which is exactly how they start out. if alpha != 0: # Update the "next" block of A that we'll work on during # the following iteration. I think it's faster to get the # entries of a row than a column here? - v = vector(ring, A[k,k+1:n].list()) - b = v.column()*v.row()/alpha for i in range(n-k-1): for j in range(i+1): - # Something goes wrong if I try to access the kth row/column - # of A to save the intermediate "b" here... - A[k+1+i,k+1+j] = A[k+1+i,k+1+j] - b[i,j] - A[k+1+j,k+1+i] = A[k+1+i,k+1+j] # keep it symmetric! + A[k+1+j,k+1+i] = A[k+1+j,k+1+i] - A[k,k+1+j]*A[k,k+1+i]/alpha + A[k+1+i,k+1+j] = A[k+1+j,k+1+i] # keep it symmetric! - # Store the "new" (kth) column of L. for i in range(n-k-1): - L[k+i+1,k] = v[i]/alpha + # Store the "new" (kth) column of L, being sure to set + # the lower-left "half" from the upper-right "half" + A[k+i+1,k] = A[k,k+1+i]/alpha + + MS = A.matrix_space() + P = MS.matrix(lambda i,j: p[j] == i) + D = MS.diagonal_matrix(A.diagonal()) + + for i in range(n): + A[i,i] = 1 + for j in range(i+1,n): + A[i,j] = 0 + + return P,A,D + + +def block_ldlt_naive(A, check_hermitian=False): + r""" + Perform a block-`LDL^{T}` factorization of the Hermitian + matrix `A`. + + This is a naive, recursive implementation akin to + ``ldlt_naive()``, where the pivots (and resulting diagonals) are + either `1 \times 1` or `2 \times 2` blocks. The pivots are chosen + using the Bunch-Kaufmann scheme that is both fast and numerically + stable. + + OUTPUT: + + A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where, + + * `P` is a permutation matrix + * `L` is unit lower-triangular + * `D` is a block-diagonal matrix whose blocks are of size + one or two. + + """ + n = A.nrows() + + # Use the fraction field of the given matrix so that division will work + # when (for example) our matrix consists of integer entries. + ring = A.base_ring().fraction_field() + + if n == 0 or n == 1: + # We can get n == 0 if someone feeds us a trivial matrix. + # For block-LDLT, n=2 is a base case. + P = matrix.identity(ring, n) + L = matrix.identity(ring, n) + D = A + return (P,L,D) + + alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8) + A1 = A.change_ring(ring) - I = A.matrix_space().identity_matrix() - P = matrix.column( I.row(p[j]) for j in range(n) ) + # Bunch-Kaufmann step 1, Higham step "zero." We use Higham's + # "omega" notation instead of Bunch-Kaufman's "lamda" because + # lambda means other things in the same context. + column_1_subdiag = [ a_i1.abs() for a_i1 in A1[1:,0].list() ] + omega_1 = max([ a_i1 for a_i1 in column_1_subdiag ]) + + if omega_1 == 0: + # "There's nothing to do at this step of the algorithm," + # which means that our matrix looks like, + # + # [ 1 0 ] + # [ 0 B ] + # + # We could still do a pivot_one_by_one() here, but it would + # pointlessly subract a bunch of zeros and multiply by one. + B = A1[1:,1:] + one = matrix(ring, 1, 1, [1]) + P2, L2, D2 = block_ldlt_naive(B) + P1 = block_diagonal_matrix(one, P2) + L1 = block_diagonal_matrix(one, L2) + D1 = block_diagonal_matrix(one, D2) + return (P1,L1,D1) + + def pivot_one_by_one(M, c=None): + # Perform a one-by-one pivot on "M," swapping row/columns "c". + # If "c" is None, no swap is performed. + if c is not None: + P1 = copy(M.matrix_space().identity_matrix()) + P1.swap_rows(0,c) + M = P1.T * M * P1 + + # The top-left entry is now our 1x1 pivot. + C = M[1:n,0] + B = M[1:,1:] + + P2, L2, D2 = block_ldlt_naive(B - (C*C.transpose())/M[0,0]) + + if c is None: + P1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], + [0*C, P2]]) + else: + P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)], + [0*C, P2]]) + + L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], + [P2.transpose()*C/M[0,0], L2]]) + D1 = block_matrix(2,2, [[M[0,0], ZZ(0)], + [0*C, D2]]) + + return (P1,L1,D1) + + + if A1[0,0].abs() > alpha*omega_1: + return pivot_one_by_one(A1) + + r = 1 + column_1_subdiag.index(omega_1) + + # If the matrix is Hermitian, we need only look at the above- + # diagonal entries to find the off-diagonal of maximal magnitude. + omega_r = max( a_rj.abs() for a_rj in A1[:r,r].list() ) + + if A1[0,0].abs()*omega_r >= alpha*(omega_1**2): + return pivot_one_by_one(A1) + + if A1[r,r].abs() > alpha*omega_r: + # Higham step (3) + # Another 1x1 pivot, but this time swapping indices 0,r. + return pivot_one_by_one(A1,r) + + # Higham step (4) + # If we made it here, we have to do a 2x2 pivot. + P1 = copy(A1.matrix_space().identity_matrix()) + P1.swap_rows(1,r) + A1 = P1.T * A1 * P1 + + # The top-left 2x2 submatrix is now our pivot. + E = A1[:2,:2] + C = A1[2:n,0:2] + B = A1[2:,2:] + + if B.nrows() == 0: + # We have a two-by-two matrix that we can do nothing + # useful with. + P = matrix.identity(ring, n) + L = matrix.identity(ring, n) + D = A1 + return (P,L,D) + + P2, L2, D2 = block_ldlt_naive(B - (C*E.inverse()*C.transpose())) + + P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)], + [0*C, P2]]) + + L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)], + [P2.transpose()*C*E.inverse(), L2]]) + D1 = block_diagonal_matrix(E,D2) + + return (P1,L1,D1) - return P,L,D + +def block_ldlt(A): + r""" + Perform a block-`LDL^{T}` factorization of the Hermitian + matrix `A`. + + OUTPUT: + + A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where, + + * `P` is a permutation matrix + * `L` is unit lower-triangular + * `D` is a block-diagonal matrix whose blocks are of size + one or two. + """ + + # We have to make at least one copy of the input matrix so that we + # can change the base ring to its fraction field. Both "L" and the + # intermediate Schur complements will potentially have entries in + # the fraction field. However, we don't need to make *two* copies. + # We can't store the entries of "D" and "L" in the same matrix if + # "D" will contain any 2x2 blocks; but we can still store the + # entries of "L" in the copy of "A" that we're going to make. + # Contrast this with the non-block LDL^T factorization where the + # entries of both "L" and "D" overwrite the lower-left half of "A". + # + # This grants us an additional speedup, since we don't have to + # permute the rows/columns of "L" *and* "A" at each iteration. + ring = A.base_ring().fraction_field() + A = A.change_ring(ring) + MS = A.matrix_space() + + # The magic constant used by Bunch-Kaufman + alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8) + + # Keep track of the permutations and diagonal blocks in a vector + # rather than in a matrix, for efficiency. + n = A.nrows() + p = list(range(n)) + d = [] + + def swap_rows_columns(M, k, s): + r""" + Swap rows/columns ``k`` and ``s`` of the matrix ``M``, and update + the list ``p`` accordingly. + """ + if s > k: + # s == k would swap row/column k with itself, and we don't + # actually want to perform the identity permutation. If + # you work out the recursive factorization by hand, you'll + # notice that the rows/columns of "L" need to be permuted + # as well. A nice side effect of storing "L" within "A" + # itself is that we can skip that step. The first column + # of "L" is hit by all of the transpositions in + # succession, and the second column is hit by all but the + # first transposition, and so on. + M.swap_columns(k,s) + M.swap_rows(k,s) + + p_k = p[k] + p[k] = p[s] + p[s] = p_k + + # No return value, we're only interested in the "side effects" + # of modifing the matrix M (by reference) and the permutation + # list p (which is in scope when this function is defined). + return + + + def pivot1x1(M, k, s): + r""" + Perform a 1x1 pivot swapping rows/columns `k` and `s >= k`. + Relies on the fact that matrices are passed by reference, + since for performance reasons this routine should overwrite + its argument. Updates the local variables ``p`` and ``d`` as + well. + """ + swap_rows_columns(M,k,s) + + # Now the pivot is in the (k,k)th position. + d.append( matrix(ring, 1, [[A[k,k]]]) ) + + # Compute the Schur complement that we'll work on during + # the following iteration, and store it back in the lower- + # right-hand corner of "A". + for i in range(n-k-1): + for j in range(i+1): + A[k+1+j,k+1+i] = ( A[k+1+j,k+1+i] - + A[k,k+1+j]*A[k,k+1+i]/A[k,k] ) + A[k+1+i,k+1+j] = A[k+1+j,k+1+i] # keep it symmetric! + + for i in range(n-k-1): + # Store the new (kth) column of "L" within the lower- + # left-hand corner of "A", being sure to set the lower- + # left entries from the upper-right ones to avoid + # collisions. + A[k+i+1,k] = A[k,k+1+i]/A[k,k] + + # No return value, only the desired side effects of updating + # p, d, and A. + return + + k = 0 + while k < n: + # At each step, we're considering the k-by-k submatrix + # contained in the lower-right half of "A", because that's + # where we're storing the next iterate. So our indices are + # always "k" greater than those of Higham or B&K. Note that + # ``n == 0`` is handled by skipping this loop entirely. + + if k == (n-1): + # Handle this trivial case manually, since otherwise the + # algorithm's references to the e.g. "subdiagonal" are + # meaningless. + d.append( matrix(ring, 1, [[A[k,k]]]) ) + k += 1 + continue + + # Find the largest subdiagonal entry (in magnitude) in the + # kth column. This occurs prior to Step (1) in Higham, + # but is part of Step (1) in Bunch and Kaufman. We adopt + # Higham's "omega" notation instead of B&K's "lambda" + # because "lambda" can lead to some confusion. Beware: + # the subdiagonals of our matrix are being overwritten! + # So we actually use the corresponding row entries instead. + column_1_subdiag = [ a_ki.abs() for a_ki in A[k,k+1:].list() ] + omega_1 = max([ a_ki for a_ki in column_1_subdiag ]) + + if omega_1 == 0: + # In this case, our matrix looks like + # + # [ a 0 ] + # [ 0 B ] + # + # and we can simply skip to the next step after recording + # the 1x1 pivot "1" in the top-left position. + d.append( matrix(ring, 1, [[A[k,k]]]) ) + k += 1 + continue + + if A[k,k].abs() > alpha*omega_1: + # This is the first case in Higham's Step (1), and B&K's + # Step (2). Note that we have skipped the part of B&K's + # Step (1) where we determine "r", since "r" is not yet + # needed and we may waste some time computing it + # otherwise. We are performing a 1x1 pivot, but the + # rows/columns are already where we want them, so nothing + # needs to be permuted. + pivot1x1(A,k,k) + k += 1 + continue + + # Now back to Step (1) of Higham, where we find the index "r" + # that corresponds to omega_1. This is the "else" branch of + # Higham's Step (1). + r = k + 1 + column_1_subdiag.index(omega_1) + + # Continuing the "else" branch of Higham's Step (1), and onto + # B&K's Step (3) where we find the largest off-diagonal entry + # (in magniture) in column "r". Since the matrix is Hermitian, + # we need only look at the above-diagonal entries to find the + # off-diagonal of maximal magnitude. (Beware: the subdiagonal + # entries are being overwritten.) + omega_r = max( a_rj.abs() for a_rj in A[:r,r].list() ) + + if A[k,k].abs()*omega_r >= alpha*(omega_1**2): + # Step (2) in Higham or Step (4) in B&K. + pivot1x1(A,k,k) + k += 1 + continue + + if A[r,r].abs() > alpha*omega_r: + # This is Step (3) in Higham or Step (5) in B&K. Still a 1x1 + # pivot, but this time we need to swap rows/columns k and r. + pivot1x1(A,k,r) + k += 1 + continue + + # If we've made it this far, we're at Step (4) in Higham or + # Step (6) in B&K, where we perform a 2x2 pivot. + swap_rows_columns(A,k+1,r) + + # The top-left 2x2 submatrix (starting at position k,k) is now + # our pivot. + E = A[k:k+2,k:k+2] + d.append(E) + + C = A[k+2:n,k:k+2] + B = A[k+2:,k+2:] + + # TODO: don't invert, there are better ways to get the C*E^(-1) + # that we need. + E_inverse = E.inverse() + + schur_complement = B - (C*E_inverse*C.transpose()) + + # Compute the Schur complement that we'll work on during + # the following iteration, and store it back in the lower- + # right-hand corner of "A". + for i in range(n-k-2): + for j in range(i+1): + A[k+2+j,k+2+i] = A[k+2+j,k+2+i] - schur_complement[j,i] + A[k+2+i,k+2+j] = A[k+2+j,k+2+i] # keep it symmetric! + + # The on- and above-diagonal entries of "L" will be fixed + # later, so we only need to worry about the lower-left entry + # of the 2x2 identity matrix that belongs at the top of the + # new column of "L". + A[k+1,k] = 0 + for i in range(n-k-2): + for j in range(2): + # Store the new (k and (k+1)st) columns of "L" within + # the lower-left-hand corner of "A", being sure to set + # the lower-left entries from the upper-right ones to + # avoid collisions. + A[k+i+2,k+j] = (C*E_inverse)[i,j] + + + k += 2 + + MS = A.matrix_space() + P = MS.matrix(lambda i,j: p[j] == i) + + # Warning: when n == 0, this works, but returns a matrix + # whose (nonexistent) entries are in ZZ rather than in + # the base ring of P and L. + D = block_diagonal_matrix(d) + + # Overwrite the diagonal and upper-right half of "A", + # since we're about to return it as the unit-lower- + # triangular "L". + for i in range(n): + A[i,i] = 1 + for j in range(i+1,n): + A[i,j] = 0 + + return (P,A,D)