]>
gitweb.michael.orlitzky.com - sage.d.git/blob - mjo/ldlt.py
d113df36476e49e244a23ec906cef2ba1453a266
3 def is_positive_semidefinite_naive(A
):
5 A naive positive-semidefinite check that tests the eigenvalues for
6 nonnegativity. We follow the sage convention that positive
7 (semi)definite matrices must be symmetric or Hermitian.
11 sage: from mjo.ldlt import is_positive_semidefinite_naive
15 The trivial matrix is vaciously positive-semidefinite::
17 sage: A = matrix(QQ, 0)
20 sage: is_positive_semidefinite_naive(A)
25 return True # vacuously
26 return A
.is_hermitian() and all( v
>= 0 for v
in A
.eigenvalues() )
30 Perform a pivoted `LDL^{T}` factorization of the Hermitian
31 positive-semidefinite matrix `A`.
33 This is a naive, recursive implementation that is inefficient due
34 to Python's lack of tail-call optimization. The pivot strategy is
35 to choose the largest diagonal entry of the matrix at each step,
36 and to permute it into the top-left position. Ultimately this
37 results in a factorization `A = PLDL^{T}P^{T}`, where `P` is a
38 permutation matrix, `L` is unit-lower-triangular, and `D` is
39 diagonal decreasing from top-left to bottom-right.
43 The algorithm is based on the discussion in Golub and Van Loan, but with
48 A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where,
50 * `P` is a permutaiton matrix
51 * `L` is unit lower-triangular
52 * `D` is a diagonal matrix whose entries are decreasing from top-left
57 sage: from mjo.ldlt import ldlt_naive, is_positive_semidefinite_naive
61 All three factors should be the identity when the original matrix is::
63 sage: I = matrix.identity(QQ,4)
64 sage: P,L,D = ldlt_naive(I)
65 sage: P == I and L == I and D == I
70 Ensure that a "random" positive-semidefinite matrix is factored correctly::
72 sage: set_random_seed()
73 sage: n = ZZ.random_element(5)
74 sage: A = matrix.random(QQ, n)
75 sage: A = A*A.transpose()
76 sage: is_positive_semidefinite_naive(A)
78 sage: P,L,D = ldlt_naive(A)
79 sage: A == P*L*D*L.transpose()*P.transpose()
85 # Use the fraction field of the given matrix so that division will work
86 # when (for example) our matrix consists of integer entries.
87 ring
= A
.base_ring().fraction_field()
90 # We can get n == 0 if someone feeds us a trivial matrix.
91 P
= matrix
.identity(ring
, n
)
92 L
= matrix
.identity(ring
, n
)
96 A1
= A
.change_ring(ring
)
98 s
= diags
.index(max(diags
))
99 P1
= copy(A1
.matrix_space().identity_matrix())
104 # Golub and Van Loan mention in passing what to do here. This is
105 # only sensible if the matrix is positive-semidefinite, because we
106 # are assuming that we can set everything else to zero as soon as
107 # we hit the first on-diagonal zero.
109 P
= A1
.matrix_space().identity_matrix()
111 D
= A1
.matrix_space().zero()
117 P2
, L2
, D2
= ldlt_naive(A2
- (v1
*v1
.transpose())/alpha1
)
119 P1
= P1
*block_matrix(2,2, [[ZZ(1), ZZ(0)],
121 L1
= block_matrix(2,2, [[ZZ(1), ZZ(0)],
122 [P2
.transpose()*v1
/alpha1
, L2
]])
123 D1
= block_matrix(2,2, [[alpha1
, ZZ(0)],
132 Perform a fast, pivoted `LDL^{T}` factorization of the Hermitian
133 positive-semidefinite matrix `A`.
135 This function is much faster than ``ldlt_naive`` because the
136 tail-recursion has been unrolled into a loop.
139 ring
= A
.base_ring().fraction_field()
141 A
= A
.change_ring(ring
)
143 # Don't try to store the results in the lower-left-hand corner of
144 # "A" itself; there lies madness.
145 L
= copy(A
.matrix_space().identity_matrix())
146 D
= copy(A
.matrix_space().zero())
148 # Keep track of the permutations in a vector rather than in a
149 # matrix, for efficiency.
153 # We need to loop once for every diagonal entry in the
154 # matrix. So, as many times as it has rows/columns. At each
155 # step, we obtain the permutation needed to put things in the
156 # right place, then the "next" entry (alpha) of D, and finally
157 # another column of L.
158 diags
= A
.diagonal()[k
:n
]
161 # We're working *within* the matrix ``A``, so every index is
162 # offset by k. For example: after the second step, we should
163 # only be looking at the lower 3-by-3 block of a 5-by-5 matrix.
164 s
= k
+ diags
.index(alpha
)
166 # Move the largest diagonal element up into the top-left corner
167 # of the block we're working on (the one starting from index k,k).
168 # Presumably this is faster than hitting the thing with a
169 # permutation matrix.
173 # Have to do L, too, to keep track of the "P2.T" (which is 1 x
174 # P3.T which is 1 x P4 T)... in the recursive
175 # algorithm. There, we compute P2^T from the bottom up. Here,
176 # we apply the permutations one at a time, essentially
177 # building them top-down (but just applying them instead of
182 # Update the permutation "matrix" with the next swap.
187 # Now the largest diagonal is in the top-left corner of
188 # the block below and to the right of index k,k....
189 # Note: same as ``pivot``.
192 # When alpha is zero, we can just leave the rest of the D/L entries
193 # zero... which is exactly how they start out.
195 # Update the "next" block of A that we'll work on during
196 # the following iteration. I think it's faster to get the
197 # entries of a row than a column here?
198 v
= vector(ring
, A
[k
,k
+1:n
].list())
199 b
= v
.column()*v
.row()/alpha
200 for i
in range(n
-k
-1):
202 # Something goes wrong if I try to access the kth row/column
203 # of A to save the intermediate "b" here...
204 A
[k
+1+i
,k
+1+j
] = A
[k
+1+i
,k
+1+j
] - b
[i
,j
]
205 A
[k
+1+j
,k
+1+i
] = A
[k
+1+i
,k
+1+j
] # keep it symmetric!
207 # Store the "new" (kth) column of L.
208 for i
in range(n
-k
-1):
209 L
[k
+i
+1,k
] = v
[i
]/alpha
211 I
= A
.matrix_space().identity_matrix()
212 P
= matrix
.column( I
.row(p
[j
]) for j
in range(n
) )