]>
gitweb.michael.orlitzky.com - sage.d.git/blob - mjo/ldlt.py
3 def is_positive_semidefinite_naive(A
):
5 A naive positive-semidefinite check that tests the eigenvalues for
6 nonnegativity. We follow the sage convention that positive
7 (semi)definite matrices must be symmetric or Hermitian.
11 sage: from mjo.ldlt import is_positive_semidefinite_naive
15 The trivial matrix is vaciously positive-semidefinite::
17 sage: A = matrix(QQ, 0)
20 sage: is_positive_semidefinite_naive(A)
25 return True # vacuously
26 return A
.is_hermitian() and all( v
>= 0 for v
in A
.eigenvalues() )
31 Perform a pivoted `LDL^{T}` factorization of the Hermitian
32 positive-semidefinite matrix `A`.
34 This is a naive, recursive implementation that is inefficient due
35 to Python's lack of tail-call optimization. The pivot strategy is
36 to choose the largest diagonal entry of the matrix at each step,
37 and to permute it into the top-left position. Ultimately this
38 results in a factorization `A = PLDL^{T}P^{T}`, where `P` is a
39 permutation matrix, `L` is unit-lower-triangular, and `D` is
40 diagonal decreasing from top-left to bottom-right.
44 The algorithm is based on the discussion in Golub and Van Loan, but with
49 A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where,
51 * `P` is a permutaiton matrix
52 * `L` is unit lower-triangular
53 * `D` is a diagonal matrix whose entries are decreasing from top-left
58 sage: from mjo.ldlt import ldlt_naive, is_positive_semidefinite_naive
62 All three factors should be the identity when the original matrix is::
64 sage: I = matrix.identity(QQ,4)
65 sage: P,L,D = ldlt_naive(I)
66 sage: P == I and L == I and D == I
71 Ensure that a "random" positive-semidefinite matrix is factored correctly::
73 sage: set_random_seed()
74 sage: n = ZZ.random_element(5)
75 sage: A = matrix.random(QQ, n)
76 sage: A = A*A.transpose()
77 sage: is_positive_semidefinite_naive(A)
79 sage: P,L,D = ldlt_naive(A)
80 sage: A == P*L*D*L.transpose()*P.transpose()
86 # Use the fraction field of the given matrix so that division will work
87 # when (for example) our matrix consists of integer entries.
88 ring
= A
.base_ring().fraction_field()
91 # We can get n == 0 if someone feeds us a trivial matrix.
92 P
= matrix
.identity(ring
, n
)
93 L
= matrix
.identity(ring
, n
)
97 A1
= A
.change_ring(ring
)
99 s
= diags
.index(max(diags
))
100 P1
= copy(A1
.matrix_space().identity_matrix())
105 # Golub and Van Loan mention in passing what to do here. This is
106 # only sensible if the matrix is positive-semidefinite, because we
107 # are assuming that we can set everything else to zero as soon as
108 # we hit the first on-diagonal zero.
110 P
= A1
.matrix_space().identity_matrix()
112 D
= A1
.matrix_space().zero()
118 P2
, L2
, D2
= ldlt_naive(A2
- (v1
*v1
.transpose())/alpha1
)
120 P1
= P1
*block_matrix(2,2, [[ZZ(1), ZZ(0)],
122 L1
= block_matrix(2,2, [[ZZ(1), ZZ(0)],
123 [P2
.transpose()*v1
/alpha1
, L2
]])
124 D1
= block_matrix(2,2, [[alpha1
, ZZ(0)],
133 Perform a fast, pivoted `LDL^{T}` factorization of the Hermitian
134 positive-semidefinite matrix `A`.
136 This function is much faster than ``ldlt_naive`` because the
137 tail-recursion has been unrolled into a loop.
139 ring
= A
.base_ring().fraction_field()
140 A
= A
.change_ring(ring
)
142 # Keep track of the permutations in a vector rather than in a
143 # matrix, for efficiency.
148 # We need to loop once for every diagonal entry in the
149 # matrix. So, as many times as it has rows/columns. At each
150 # step, we obtain the permutation needed to put things in the
151 # right place, then the "next" entry (alpha) of D, and finally
152 # another column of L.
153 diags
= A
.diagonal()[k
:n
]
156 # We're working *within* the matrix ``A``, so every index is
157 # offset by k. For example: after the second step, we should
158 # only be looking at the lower 3-by-3 block of a 5-by-5 matrix.
159 s
= k
+ diags
.index(alpha
)
161 # Move the largest diagonal element up into the top-left corner
162 # of the block we're working on (the one starting from index k,k).
163 # Presumably this is faster than hitting the thing with a
164 # permutation matrix.
166 # Since "L" is stored in the lower-left "half" of "A", it's a
167 # good thing that we need to permuts "L," too. This is due to
168 # how P2.T appears in the recursive algorithm applied to the
169 # "current" column of L There, P2.T is computed recusively, as
170 # 1 x P3.T, and P3.T = 1 x P4.T, etc, from the bottom up. All
171 # are eventually applied to "v" in order. Here we're working
172 # from the top down, and rather than keep track of what
173 # permutations we need to perform, we just perform them as we
174 # go along. No recursion needed.
178 # Update the permutation "matrix" with the swap we just did.
183 # Now the largest diagonal is in the top-left corner of the
184 # block below and to the right of index k,k. When alpha is
185 # zero, we can just leave the rest of the D/L entries
186 # zero... which is exactly how they start out.
188 # Update the "next" block of A that we'll work on during
189 # the following iteration. I think it's faster to get the
190 # entries of a row than a column here?
191 for i
in range(n
-k
-1):
193 A
[k
+1+j
,k
+1+i
] = A
[k
+1+j
,k
+1+i
] - A
[k
,k
+1+j
]*A
[k
,k
+1+i
]/alpha
194 A
[k
+1+i
,k
+1+j
] = A
[k
+1+j
,k
+1+i
] # keep it symmetric!
196 for i
in range(n
-k
-1):
197 # Store the "new" (kth) column of L, being sure to set
198 # the lower-left "half" from the upper-right "half"
199 A
[k
+i
+1,k
] = A
[k
,k
+1+i
]/alpha
201 MS
= A
.matrix_space()
202 P
= MS
.matrix(lambda i
,j
: p
[j
] == i
)
203 D
= MS
.diagonal_matrix(A
.diagonal())
207 for j
in range(i
+1,n
):