X-Git-Url: http://gitweb.michael.orlitzky.com/?a=blobdiff_plain;f=mjo%2Fldlt.py;h=461dda3062f1de5e1f0d2acac07d9589d28f8ca5;hb=3abfe3dd31271b6863d52b4fe23993b92359bed9;hp=9a6070d1fadf7e66663336cd35af96b8adf2d30e;hpb=c3ce78725469fb9d75d7c1d74b33bf1d65eea9cc;p=sage.d.git diff --git a/mjo/ldlt.py b/mjo/ldlt.py index 9a6070d..461dda3 100644 --- a/mjo/ldlt.py +++ b/mjo/ldlt.py @@ -353,14 +353,144 @@ def block_ldlt(A): Perform a block-`LDL^{T}` factorization of the Hermitian matrix `A`. + The standard `LDL^{T}` factorization of a positive-definite matrix + `A` factors it as `A = LDL^{T}` where `L` is unit-lower-triangular + and `D` is diagonal. If one allows row/column swaps via a + permutation matrix `P`, then this factorization can be extended to + some positive-semidefinite matrices `A` via the factorization + `P^{T}AP = LDL^{T}` that places the zeros at the bottom of `D` to + avoid division by zero. These factorizations extend easily to + complex Hermitian matrices when one replaces the transpose by the + conjugate-transpose. + + However, we can go one step further. If, in addition, we allow `D` + to potentially contain `2 \times 2` blocks on its diagonal, then + every real or complex Hermitian matrix `A` can be factored as `A = + PLDL^{*}P^{T}`. When the row/column swaps are made intelligently, + this process is numerically stable over inexact rings like ``RDF``. + Bunch and Kaufman describe such a "pivot" scheme that is suitable + for the solution of Hermitian systems, and that is how we choose + our row and column swaps. + OUTPUT: - A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where, + If the input matrix is Hermitian, we return a triple `(P,L,D)` + such that `A = PLDL^{*}P^{T}` and - * `P` is a permutation matrix - * `L` is unit lower-triangular + * `P` is a permutation matrix, + * `L` is unit lower-triangular, * `D` is a block-diagonal matrix whose blocks are of size one or two. + + If the input matrix is not Hermitian, the output from this function + is undefined. + + EXAMPLES: + + This three-by-three real symmetric matrix has one positive, one + negative, and one zero eigenvalue -- so it is not any flavor of + (semi)definite, yet we can still factor it:: + + sage: A = matrix(QQ, [[0, 1, 0], + ....: [1, 1, 2], + ....: [0, 2, 0]]) + sage: P,L,D = block_ldlt(A) + sage: P + [0 0 1] + [1 0 0] + [0 1 0] + sage: L + [ 1 0 0] + [ 2 1 0] + [ 1 1/2 1] + sage: D + [ 1| 0| 0] + [--+--+--] + [ 0|-4| 0] + [--+--+--] + [ 0| 0| 0] + sage: P.T*A*P == L*D*L.T + True + + This two-by-two matrix has no standard factorization, but it + constitutes its own block-factorization:: + + sage: A = matrix(QQ, [ [0,1], + ....: [1,0] ]) + sage: block_ldlt(A) + ( + [1 0] [1 0] [0 1] + [0 1], [0 1], [1 0] + ) + + The same is true of the following complex Hermitian matrix:: + + sage: A = matrix(QQbar, [ [ 0,I], + ....: [-I,0] ]) + sage: block_ldlt(A) + ( + [1 0] [1 0] [ 0 I] + [0 1], [0 1], [-I 0] + ) + + TESTS: + + All three factors should be the identity when the original matrix is:: + + sage: set_random_seed() + sage: n = ZZ.random_element(6) + sage: I = matrix.identity(QQ,n) + sage: P,L,D = block_ldlt(I) + sage: P == I and L == I and D == I + True + + Ensure that a "random" real symmetric matrix is factored correctly:: + + sage: set_random_seed() + sage: n = ZZ.random_element(6) + sage: F = NumberField(x^2 +1, 'I') + sage: A = matrix.random(F, n) + sage: A = A + A.transpose() + sage: P,L,D = block_ldlt(A) + sage: A == P*L*D*L.transpose()*P.transpose() + True + + Ensure that a "random" complex Hermitian matrix is factored correctly:: + + sage: set_random_seed() + sage: n = ZZ.random_element(6) + sage: F = NumberField(x^2 +1, 'I') + sage: A = matrix.random(F, n) + sage: A = A + A.conjugate_transpose() + sage: P,L,D = block_ldlt(A) + sage: A == P*L*D*L.transpose()*P.transpose() + True + + Ensure that a "random" complex positive-semidefinite matrix is + factored correctly and that the resulting block-diagonal matrix is + in fact diagonal:: + + sage: set_random_seed() + sage: n = ZZ.random_element(6) + sage: F = NumberField(x^2 +1, 'I') + sage: A = matrix.random(F, n) + sage: A = A*A.conjugate_transpose() + sage: P,L,D = block_ldlt(A) + sage: A == P*L*D*L.transpose()*P.transpose() + True + sage: diagonal_matrix(D.diagonal()) == D + True + + The factorization should be a no-op on diagonal matrices:: + + sage: set_random_seed() + sage: n = ZZ.random_element(6) + sage: A = matrix.diagonal(random_vector(QQ, n)) + sage: I = matrix.identity(QQ,n) + sage: P,L,D = block_ldlt(A) + sage: P == I and L == I and A == D + True + """ # We have to make at least one copy of the input matrix so that we @@ -434,16 +564,14 @@ def block_ldlt(A): # right-hand corner of "A". for i in range(n-k-1): for j in range(i+1): - A[k+1+j,k+1+i] = ( A[k+1+j,k+1+i] - - A[k,k+1+j]*A[k,k+1+i]/A[k,k] ) - A[k+1+i,k+1+j] = A[k+1+j,k+1+i] # keep it symmetric! + A[k+1+i,k+1+j] = ( A[k+1+i,k+1+j] - + A[k+1+i,k]*A[k,k+1+j]/A[k,k] ) + A[k+1+j,k+1+i] = A[k+1+i,k+1+j].conjugate() # stay hermitian! for i in range(n-k-1): # Store the new (kth) column of "L" within the lower- - # left-hand corner of "A", being sure to set the lower- - # left entries from the upper-right ones to avoid - # collisions. - A[k+i+1,k] = A[k,k+1+i]/A[k,k] + # left-hand corner of "A". + A[k+i+1,k] /= A[k,k] # No return value, only the desired side effects of updating # p, d, and A. @@ -460,7 +588,9 @@ def block_ldlt(A): if k == (n-1): # Handle this trivial case manually, since otherwise the # algorithm's references to the e.g. "subdiagonal" are - # meaningless. + # meaningless. The corresponding entry of "L" will be + # fixed later (since it's an on-diagonal element, it gets + # set to one eventually). d.append( matrix(ring, 1, [[A[k,k]]]) ) k += 1 continue @@ -469,10 +599,8 @@ def block_ldlt(A): # kth column. This occurs prior to Step (1) in Higham, # but is part of Step (1) in Bunch and Kaufman. We adopt # Higham's "omega" notation instead of B&K's "lambda" - # because "lambda" can lead to some confusion. Beware: - # the subdiagonals of our matrix are being overwritten! - # So we actually use the corresponding row entries instead. - column_1_subdiag = [ a_ki.abs() for a_ki in A[k,k+1:].list() ] + # because "lambda" can lead to some confusion. + column_1_subdiag = [ a_ki.abs() for a_ki in A[k+1:,k].list() ] omega_1 = max([ a_ki for a_ki in column_1_subdiag ]) if omega_1 == 0: @@ -482,7 +610,9 @@ def block_ldlt(A): # [ 0 B ] # # and we can simply skip to the next step after recording - # the 1x1 pivot "1" in the top-left position. + # the 1x1 pivot "a" in the top-left position. The entry "a" + # will be adjusted to "1" later on to ensure that "L" is + # (block) unit-lower-triangular. d.append( matrix(ring, 1, [[A[k,k]]]) ) k += 1 continue @@ -508,9 +638,8 @@ def block_ldlt(A): # B&K's Step (3) where we find the largest off-diagonal entry # (in magniture) in column "r". Since the matrix is Hermitian, # we need only look at the above-diagonal entries to find the - # off-diagonal of maximal magnitude. (Beware: the subdiagonal - # entries are being overwritten.) - omega_r = max( a_rj.abs() for a_rj in A[:r,r].list() ) + # off-diagonal of maximal magnitude. + omega_r = max( a_rj.abs() for a_rj in A[r,k:r].list() ) if A[k,k].abs()*omega_r >= alpha*(omega_1**2): # Step (2) in Higham or Step (4) in B&K. @@ -540,22 +669,22 @@ def block_ldlt(A): # We don't actually need the inverse of E, what we really need # is C*E.inverse(), and that can be found by setting # - # C*E.inverse() == X <====> XE == C. + # X = C*E.inverse() <====> XE = C. # - # The latter can be found much more easily by solving a system. - # Note: I do not actually know that sage solves the system more + # Then "X" can be found easily by solving a system. Note: I + # do not actually know that sage solves the system more # intelligently, but this is still The Right Thing To Do. CE_inverse = E.solve_left(C) - schur_complement = B - (CE_inverse*C.transpose()) + schur_complement = B - (CE_inverse*C.conjugate_transpose()) # Compute the Schur complement that we'll work on during # the following iteration, and store it back in the lower- # right-hand corner of "A". for i in range(n-k-2): for j in range(i+1): - A[k+2+j,k+2+i] = A[k+2+j,k+2+i] - schur_complement[j,i] - A[k+2+i,k+2+j] = A[k+2+j,k+2+i] # keep it symmetric! + A[k+2+i,k+2+j] = schur_complement[i,j] + A[k+2+j,k+2+i] = schur_complement[j,i] # The on- and above-diagonal entries of "L" will be fixed # later, so we only need to worry about the lower-left entry @@ -565,9 +694,7 @@ def block_ldlt(A): for i in range(n-k-2): for j in range(2): # Store the new (k and (k+1)st) columns of "L" within - # the lower-left-hand corner of "A", being sure to set - # the lower-left entries from the upper-right ones to - # avoid collisions. + # the lower-left-hand corner of "A". A[k+i+2,k+j] = CE_inverse[i,j]