return True # vacuously
return A.is_hermitian() and all( v >= 0 for v in A.eigenvalues() )
-def ldlt_naive(A):
+
+def _block_ldlt(A):
r"""
- Perform a pivoted `LDL^{T}` factorization of the Hermitian
- positive-semidefinite matrix `A`.
+ Perform a user-unfriendly block-`LDL^{T}` factorization of the
+ Hermitian matrix `A`
+
+ This function is used internally to compute the factorization for
+ the user-friendly ``block_ldlt`` function. Whereas that function
+ returns three nice matrices, this one returns
+
+ * A list ``p`` of the first ``n`` natural numbers, permuted.
+ * A matrix whose lower-triangular portion is ``L``, but whose
+ * (strict) upper-triangular portion is junk.
+ * A list of the block-diagonal entries of ``D``
+
+ This is mainly useful to avoid havinf to "undo" the construction
+ of the matrix ``D`` when we don't need it. For example, it's much
+ easier to compute the inertia of a matrix from the list of blocks
+ than it is from the block-diagonal matrix itself, because given a
+ block-diagonal matrix, you first have to figure out where the
+ blocks are!
+ """
+ ring = A.base_ring().fraction_field()
+ A = A.change_ring(ring)
+ MS = A.matrix_space()
+
+ # The magic constant used by Bunch-Kaufman
+ alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8)
+
+ # Keep track of the permutations and diagonal blocks in a vector
+ # rather than in a matrix, for efficiency.
+ n = A.nrows()
+ p = list(range(n))
+ d = []
+
+ def swap_rows_columns(M, k, s):
+ r"""
+ Swap rows/columns ``k`` and ``s`` of the matrix ``M``, and update
+ the list ``p`` accordingly.
+ """
+ if s > k:
+ # s == k would swap row/column k with itself, and we don't
+ # actually want to perform the identity permutation. If
+ # you work out the recursive factorization by hand, you'll
+ # notice that the rows/columns of "L" need to be permuted
+ # as well. A nice side effect of storing "L" within "A"
+ # itself is that we can skip that step. The first column
+ # of "L" is hit by all of the transpositions in
+ # succession, and the second column is hit by all but the
+ # first transposition, and so on.
+ M.swap_columns(k,s)
+ M.swap_rows(k,s)
+
+ p_k = p[k]
+ p[k] = p[s]
+ p[s] = p_k
+
+ # No return value, we're only interested in the "side effects"
+ # of modifing the matrix M (by reference) and the permutation
+ # list p (which is in scope when this function is defined).
+ return
+
+
+ def pivot1x1(M, k, s):
+ r"""
+ Perform a 1x1 pivot swapping rows/columns `k` and `s >= k`.
+ Relies on the fact that matrices are passed by reference,
+ since for performance reasons this routine should overwrite
+ its argument. Updates the local variables ``p`` and ``d`` as
+ well.
+ """
+ swap_rows_columns(M,k,s)
+
+ # Now the pivot is in the (k,k)th position.
+ d.append( matrix(ring, 1, [[A[k,k]]]) )
+
+ # Compute the Schur complement that we'll work on during
+ # the following iteration, and store it back in the lower-
+ # right-hand corner of "A".
+ for i in range(n-k-1):
+ for j in range(i+1):
+ A[k+1+i,k+1+j] = ( A[k+1+i,k+1+j] -
+ A[k+1+i,k]*A[k,k+1+j]/A[k,k] )
+ A[k+1+j,k+1+i] = A[k+1+i,k+1+j].conjugate() # stay hermitian!
+
+ for i in range(n-k-1):
+ # Store the new (kth) column of "L" within the lower-
+ # left-hand corner of "A".
+ A[k+i+1,k] /= A[k,k]
+
+ # No return value, only the desired side effects of updating
+ # p, d, and A.
+ return
- This is a naive, recursive implementation that is inefficient due
- to Python's lack of tail-call optimization. The pivot strategy is
- to choose the largest diagonal entry of the matrix at each step,
- and to permute it into the top-left position. Ultimately this
- results in a factorization `A = PLDL^{T}P^{T}`, where `P` is a
- permutation matrix, `L` is unit-lower-triangular, and `D` is
- diagonal decreasing from top-left to bottom-right.
+ k = 0
+ while k < n:
+ # At each step, we're considering the k-by-k submatrix
+ # contained in the lower-right half of "A", because that's
+ # where we're storing the next iterate. So our indices are
+ # always "k" greater than those of Higham or B&K. Note that
+ # ``n == 0`` is handled by skipping this loop entirely.
- ALGORITHM:
+ if k == (n-1):
+ # Handle this trivial case manually, since otherwise the
+ # algorithm's references to the e.g. "subdiagonal" are
+ # meaningless. The corresponding entry of "L" will be
+ # fixed later (since it's an on-diagonal element, it gets
+ # set to one eventually).
+ d.append( matrix(ring, 1, [[A[k,k]]]) )
+ k += 1
+ continue
- The algorithm is based on the discussion in Golub and Van Loan, but with
- some "typos" fixed.
+ # Find the largest subdiagonal entry (in magnitude) in the
+ # kth column. This occurs prior to Step (1) in Higham,
+ # but is part of Step (1) in Bunch and Kaufman. We adopt
+ # Higham's "omega" notation instead of B&K's "lambda"
+ # because "lambda" can lead to some confusion.
+ column_1_subdiag = [ a_ki.abs() for a_ki in A[k+1:,k].list() ]
+ omega_1 = max([ a_ki for a_ki in column_1_subdiag ])
+
+ if omega_1 == 0:
+ # In this case, our matrix looks like
+ #
+ # [ a 0 ]
+ # [ 0 B ]
+ #
+ # and we can simply skip to the next step after recording
+ # the 1x1 pivot "a" in the top-left position. The entry "a"
+ # will be adjusted to "1" later on to ensure that "L" is
+ # (block) unit-lower-triangular.
+ d.append( matrix(ring, 1, [[A[k,k]]]) )
+ k += 1
+ continue
+
+ if A[k,k].abs() > alpha*omega_1:
+ # This is the first case in Higham's Step (1), and B&K's
+ # Step (2). Note that we have skipped the part of B&K's
+ # Step (1) where we determine "r", since "r" is not yet
+ # needed and we may waste some time computing it
+ # otherwise. We are performing a 1x1 pivot, but the
+ # rows/columns are already where we want them, so nothing
+ # needs to be permuted.
+ pivot1x1(A,k,k)
+ k += 1
+ continue
+
+ # Now back to Step (1) of Higham, where we find the index "r"
+ # that corresponds to omega_1. This is the "else" branch of
+ # Higham's Step (1).
+ r = k + 1 + column_1_subdiag.index(omega_1)
+
+ # Continuing the "else" branch of Higham's Step (1), and onto
+ # B&K's Step (3) where we find the largest off-diagonal entry
+ # (in magniture) in column "r". Since the matrix is Hermitian,
+ # we need only look at the above-diagonal entries to find the
+ # off-diagonal of maximal magnitude.
+ omega_r = max( a_rj.abs() for a_rj in A[r,k:r].list() )
+
+ if A[k,k].abs()*omega_r >= alpha*(omega_1**2):
+ # Step (2) in Higham or Step (4) in B&K.
+ pivot1x1(A,k,k)
+ k += 1
+ continue
+
+ if A[r,r].abs() > alpha*omega_r:
+ # This is Step (3) in Higham or Step (5) in B&K. Still a 1x1
+ # pivot, but this time we need to swap rows/columns k and r.
+ pivot1x1(A,k,r)
+ k += 1
+ continue
+
+ # If we've made it this far, we're at Step (4) in Higham or
+ # Step (6) in B&K, where we perform a 2x2 pivot.
+ swap_rows_columns(A,k+1,r)
+
+ # The top-left 2x2 submatrix (starting at position k,k) is now
+ # our pivot.
+ E = A[k:k+2,k:k+2]
+ d.append(E)
+
+ C = A[k+2:n,k:k+2]
+ B = A[k+2:,k+2:]
+
+ # We don't actually need the inverse of E, what we really need
+ # is C*E.inverse(), and that can be found by setting
+ #
+ # X = C*E.inverse() <====> XE = C.
+ #
+ # Then "X" can be found easily by solving a system. Note: I
+ # do not actually know that sage solves the system more
+ # intelligently, but this is still The Right Thing To Do.
+ CE_inverse = E.solve_left(C)
+
+ schur_complement = B - (CE_inverse*C.conjugate_transpose())
+
+ # Compute the Schur complement that we'll work on during
+ # the following iteration, and store it back in the lower-
+ # right-hand corner of "A".
+ for i in range(n-k-2):
+ for j in range(i+1):
+ A[k+2+i,k+2+j] = schur_complement[i,j]
+ A[k+2+j,k+2+i] = schur_complement[j,i]
+
+ # The on- and above-diagonal entries of "L" will be fixed
+ # later, so we only need to worry about the lower-left entry
+ # of the 2x2 identity matrix that belongs at the top of the
+ # new column of "L".
+ A[k+1,k] = 0
+ for i in range(n-k-2):
+ for j in range(2):
+ # Store the new (k and (k+1)st) columns of "L" within
+ # the lower-left-hand corner of "A".
+ A[k+i+2,k+j] = CE_inverse[i,j]
+
+
+ k += 2
+
+ for i in range(n):
+ # We skipped this during the main loop, but it's necessary for
+ # correctness.
+ A[i,i] = 1
+
+ return (p,A,d)
+
+def block_ldlt(A):
+ r"""
+ Perform a block-`LDL^{T}` factorization of the Hermitian
+ matrix `A`.
+
+ The standard `LDL^{T}` factorization of a positive-definite matrix
+ `A` factors it as `A = LDL^{T}` where `L` is unit-lower-triangular
+ and `D` is diagonal. If one allows row/column swaps via a
+ permutation matrix `P`, then this factorization can be extended to
+ some positive-semidefinite matrices `A` via the factorization
+ `P^{T}AP = LDL^{T}` that places the zeros at the bottom of `D` to
+ avoid division by zero. These factorizations extend easily to
+ complex Hermitian matrices when one replaces the transpose by the
+ conjugate-transpose.
+
+ However, we can go one step further. If, in addition, we allow `D`
+ to potentially contain `2 \times 2` blocks on its diagonal, then
+ every real or complex Hermitian matrix `A` can be factored as `A =
+ PLDL^{*}P^{T}`. When the row/column swaps are made intelligently,
+ this process is numerically stable over inexact rings like ``RDF``.
+ Bunch and Kaufman describe such a "pivot" scheme that is suitable
+ for the solution of Hermitian systems, and that is how we choose
+ our row and column swaps.
OUTPUT:
- A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where,
+ If the input matrix is Hermitian, we return a triple `(P,L,D)`
+ such that `A = PLDL^{*}P^{T}` and
- * `P` is a permutaiton matrix
- * `L` is unit lower-triangular
- * `D` is a diagonal matrix whose entries are decreasing from top-left
- to bottom-right
+ * `P` is a permutation matrix,
+ * `L` is unit lower-triangular,
+ * `D` is a block-diagonal matrix whose blocks are of size
+ one or two.
+
+ If the input matrix is not Hermitian, the output from this function
+ is undefined.
SETUP::
- sage: from mjo.ldlt import ldlt_naive, is_positive_semidefinite_naive
+ sage: from mjo.ldlt import block_ldlt
EXAMPLES:
- All three factors should be the identity when the original matrix is::
+ This three-by-three real symmetric matrix has one positive, one
+ negative, and one zero eigenvalue -- so it is not any flavor of
+ (semi)definite, yet we can still factor it::
- sage: I = matrix.identity(QQ,4)
- sage: P,L,D = ldlt_naive(I)
- sage: P == I and L == I and D == I
+ sage: A = matrix(QQ, [[0, 1, 0],
+ ....: [1, 1, 2],
+ ....: [0, 2, 0]])
+ sage: P,L,D = block_ldlt(A)
+ sage: P
+ [0 0 1]
+ [1 0 0]
+ [0 1 0]
+ sage: L
+ [ 1 0 0]
+ [ 2 1 0]
+ [ 1 1/2 1]
+ sage: D
+ [ 1| 0| 0]
+ [--+--+--]
+ [ 0|-4| 0]
+ [--+--+--]
+ [ 0| 0| 0]
+ sage: P.transpose()*A*P == L*D*L.transpose()
True
+ This two-by-two matrix has no standard factorization, but it
+ constitutes its own block-factorization::
+
+ sage: A = matrix(QQ, [ [0,1],
+ ....: [1,0] ])
+ sage: block_ldlt(A)
+ (
+ [1 0] [1 0] [0 1]
+ [0 1], [0 1], [1 0]
+ )
+
+ The same is true of the following complex Hermitian matrix::
+
+ sage: A = matrix(QQbar, [ [ 0,I],
+ ....: [-I,0] ])
+ sage: block_ldlt(A)
+ (
+ [1 0] [1 0] [ 0 I]
+ [0 1], [0 1], [-I 0]
+ )
+
TESTS:
- Ensure that a "random" positive-semidefinite matrix is factored correctly::
+ All three factors should be the identity when the original matrix is::
sage: set_random_seed()
- sage: n = ZZ.random_element(5)
- sage: A = matrix.random(QQ, n)
- sage: A = A*A.transpose()
- sage: is_positive_semidefinite_naive(A)
+ sage: n = ZZ.random_element(6)
+ sage: I = matrix.identity(QQ,n)
+ sage: P,L,D = block_ldlt(I)
+ sage: P == I and L == I and D == I
True
- sage: P,L,D = ldlt_naive(A)
+
+ Ensure that a "random" real symmetric matrix is factored correctly::
+
+ sage: set_random_seed()
+ sage: n = ZZ.random_element(6)
+ sage: A = matrix.random(QQ, n)
+ sage: A = A + A.transpose()
+ sage: P,L,D = block_ldlt(A)
sage: A == P*L*D*L.transpose()*P.transpose()
True
+ Ensure that a "random" complex Hermitian matrix is factored correctly::
+
+ sage: set_random_seed()
+ sage: n = ZZ.random_element(6)
+ sage: F = NumberField(x^2 +1, 'I')
+ sage: A = matrix.random(F, n)
+ sage: A = A + A.conjugate_transpose()
+ sage: P,L,D = block_ldlt(A)
+ sage: A == P*L*D*L.conjugate_transpose()*P.conjugate_transpose()
+ True
+
+ Ensure that a "random" complex positive-semidefinite matrix is
+ factored correctly and that the resulting block-diagonal matrix is
+ in fact diagonal::
+
+ sage: set_random_seed()
+ sage: n = ZZ.random_element(6)
+ sage: F = NumberField(x^2 +1, 'I')
+ sage: A = matrix.random(F, n)
+ sage: A = A*A.conjugate_transpose()
+ sage: P,L,D = block_ldlt(A)
+ sage: A == P*L*D*L.conjugate_transpose()*P.conjugate_transpose()
+ True
+ sage: diagonal_matrix(D.diagonal()) == D
+ True
+
+ The factorization should be a no-op on diagonal matrices::
+
+ sage: set_random_seed()
+ sage: n = ZZ.random_element(6)
+ sage: A = matrix.diagonal(random_vector(QQ, n))
+ sage: I = matrix.identity(QQ,n)
+ sage: P,L,D = block_ldlt(A)
+ sage: P == I and L == I and A == D
+ True
+
"""
- n = A.nrows()
- # Use the fraction field of the given matrix so that division will work
- # when (for example) our matrix consists of integer entries.
- ring = A.base_ring().fraction_field()
+ # We have to make at least one copy of the input matrix so that we
+ # can change the base ring to its fraction field. Both "L" and the
+ # intermediate Schur complements will potentially have entries in
+ # the fraction field. However, we don't need to make *two* copies.
+ # We can't store the entries of "D" and "L" in the same matrix if
+ # "D" will contain any 2x2 blocks; but we can still store the
+ # entries of "L" in the copy of "A" that we're going to make.
+ # Contrast this with the non-block LDL^T factorization where the
+ # entries of both "L" and "D" overwrite the lower-left half of "A".
+ #
+ # This grants us an additional speedup, since we don't have to
+ # permute the rows/columns of "L" *and* "A" at each iteration.
+ p,L,d = _block_ldlt(A)
+ MS = L.matrix_space()
+ P = MS.matrix(lambda i,j: p[j] == i)
+
+ # Warning: when n == 0, this works, but returns a matrix
+ # whose (nonexistent) entries are in ZZ rather than in
+ # the base ring of P and L.
+ D = block_diagonal_matrix(d)
+
+ # Overwrite the (strict) upper-triangular part of "L", since a
+ # priori it contains the same entries as "A" did after _block_ldlt().
+ n = L.nrows()
+ for i in range(n):
+ for j in range(i+1,n):
+ L[i,j] = 0
- if n == 0 or n == 1:
- # We can get n == 0 if someone feeds us a trivial matrix.
- P = matrix.identity(ring, n)
- L = matrix.identity(ring, n)
- D = A
- return (P,L,D)
-
- A1 = A.change_ring(ring)
- diags = A1.diagonal()
- s = diags.index(max(diags))
- P1 = copy(A1.matrix_space().identity_matrix())
- A1 = P1.T * A1 * P1
- alpha1 = A1[0,0]
-
- # Golub and Van Loan mention in passing what to do here. This is
- # only sensible if the matrix is positive-semidefinite, because we
- # are assuming that we can set everything else to zero as soon as
- # we hit the first on-diagonal zero.
- if alpha1 == 0:
- P = A1.matrix_space().identity_matrix()
- L = P
- D = A1.matrix_space().zero()
- return (P,L,D)
-
- v1 = A1[1:n,0]
- A2 = A1[1:,1:]
-
- P2, L2, D2 = ldlt_naive(A2 - (v1*v1.transpose())/alpha1)
-
- P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)],
- [0*v1, P2]])
- L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)],
- [P2.transpose()*v1/alpha1, L2]])
- D1 = block_matrix(2,2, [[alpha1, ZZ(0)],
- [0*v1, D2]])
-
- return (P1,L1,D1)
+ return (P,L,D)