+ # Find the largest subdiagonal entry (in magnitude) in the
+ # kth column. This occurs prior to Step (1) in Higham,
+ # but is part of Step (1) in Bunch and Kaufman. We adopt
+ # Higham's "omega" notation instead of B&K's "lambda"
+ # because "lambda" can lead to some confusion.
+ column_1_subdiag = [ a_ki.abs() for a_ki in A[k+1:,k].list() ]
+ omega_1 = max([ a_ki for a_ki in column_1_subdiag ])
+
+ if omega_1 == 0:
+ # In this case, our matrix looks like
+ #
+ # [ a 0 ]
+ # [ 0 B ]
+ #
+ # and we can simply skip to the next step after recording
+ # the 1x1 pivot "a" in the top-left position. The entry "a"
+ # will be adjusted to "1" later on to ensure that "L" is
+ # (block) unit-lower-triangular.
+ d.append( matrix(ring, 1, [[A[k,k]]]) )
+ k += 1
+ continue
+
+ if A[k,k].abs() > alpha*omega_1:
+ # This is the first case in Higham's Step (1), and B&K's
+ # Step (2). Note that we have skipped the part of B&K's
+ # Step (1) where we determine "r", since "r" is not yet
+ # needed and we may waste some time computing it
+ # otherwise. We are performing a 1x1 pivot, but the
+ # rows/columns are already where we want them, so nothing
+ # needs to be permuted.
+ pivot1x1(A,k,k)
+ k += 1
+ continue
+
+ # Now back to Step (1) of Higham, where we find the index "r"
+ # that corresponds to omega_1. This is the "else" branch of
+ # Higham's Step (1).
+ r = k + 1 + column_1_subdiag.index(omega_1)
+
+ # Continuing the "else" branch of Higham's Step (1), and onto
+ # B&K's Step (3) where we find the largest off-diagonal entry
+ # (in magniture) in column "r". Since the matrix is Hermitian,
+ # we need only look at the above-diagonal entries to find the
+ # off-diagonal of maximal magnitude.
+ omega_r = max( a_rj.abs() for a_rj in A[r,k:r].list() )
+
+ if A[k,k].abs()*omega_r >= alpha*(omega_1**2):
+ # Step (2) in Higham or Step (4) in B&K.
+ pivot1x1(A,k,k)
+ k += 1
+ continue
+
+ if A[r,r].abs() > alpha*omega_r:
+ # This is Step (3) in Higham or Step (5) in B&K. Still a 1x1
+ # pivot, but this time we need to swap rows/columns k and r.
+ pivot1x1(A,k,r)
+ k += 1
+ continue
+
+ # If we've made it this far, we're at Step (4) in Higham or
+ # Step (6) in B&K, where we perform a 2x2 pivot.
+ swap_rows_columns(A,k+1,r)
+
+ # The top-left 2x2 submatrix (starting at position k,k) is now
+ # our pivot.
+ E = A[k:k+2,k:k+2]
+ d.append(E)
+
+ C = A[k+2:n,k:k+2]
+ B = A[k+2:,k+2:]
+
+ # We don't actually need the inverse of E, what we really need
+ # is C*E.inverse(), and that can be found by setting
+ #
+ # X = C*E.inverse() <====> XE = C.
+ #
+ # Then "X" can be found easily by solving a system. Note: I
+ # do not actually know that sage solves the system more
+ # intelligently, but this is still The Right Thing To Do.
+ CE_inverse = E.solve_left(C)
+
+ schur_complement = B - (CE_inverse*C.conjugate_transpose())
+
+ # Compute the Schur complement that we'll work on during
+ # the following iteration, and store it back in the lower-
+ # right-hand corner of "A".
+ for i in range(n-k-2):
+ for j in range(i+1):
+ A[k+2+i,k+2+j] = schur_complement[i,j]
+ A[k+2+j,k+2+i] = schur_complement[j,i]
+
+ # The on- and above-diagonal entries of "L" will be fixed
+ # later, so we only need to worry about the lower-left entry
+ # of the 2x2 identity matrix that belongs at the top of the
+ # new column of "L".
+ A[k+1,k] = 0
+ for i in range(n-k-2):
+ for j in range(2):
+ # Store the new (k and (k+1)st) columns of "L" within
+ # the lower-left-hand corner of "A".
+ A[k+i+2,k+j] = CE_inverse[i,j]
+
+
+ k += 2
+
+ for i in range(n):
+ # We skipped this during the main loop, but it's necessary for
+ # correctness.
+ A[i,i] = 1
+
+ return (p,A,d)
+
+def block_ldlt(A):
+ r"""
+ Perform a block-`LDL^{T}` factorization of the Hermitian
+ matrix `A`.
+
+ The standard `LDL^{T}` factorization of a positive-definite matrix
+ `A` factors it as `A = LDL^{T}` where `L` is unit-lower-triangular
+ and `D` is diagonal. If one allows row/column swaps via a
+ permutation matrix `P`, then this factorization can be extended to
+ some positive-semidefinite matrices `A` via the factorization
+ `P^{T}AP = LDL^{T}` that places the zeros at the bottom of `D` to
+ avoid division by zero. These factorizations extend easily to
+ complex Hermitian matrices when one replaces the transpose by the
+ conjugate-transpose.
+
+ However, we can go one step further. If, in addition, we allow `D`
+ to potentially contain `2 \times 2` blocks on its diagonal, then
+ every real or complex Hermitian matrix `A` can be factored as `A =
+ PLDL^{*}P^{T}`. When the row/column swaps are made intelligently,
+ this process is numerically stable over inexact rings like ``RDF``.
+ Bunch and Kaufman describe such a "pivot" scheme that is suitable
+ for the solution of Hermitian systems, and that is how we choose
+ our row and column swaps.