]> gitweb.michael.orlitzky.com - sage.d.git/blob - mjo/ldlt.py
mjo/ldlt.py: rule #1 is never compute the inverse of a matrix.
[sage.d.git] / mjo / ldlt.py
1 from sage.all import *
2
3 def is_positive_semidefinite_naive(A):
4 r"""
5 A naive positive-semidefinite check that tests the eigenvalues for
6 nonnegativity. We follow the sage convention that positive
7 (semi)definite matrices must be symmetric or Hermitian.
8
9 SETUP::
10
11 sage: from mjo.ldlt import is_positive_semidefinite_naive
12
13 TESTS:
14
15 The trivial matrix is vaciously positive-semidefinite::
16
17 sage: A = matrix(QQ, 0)
18 sage: A
19 []
20 sage: is_positive_semidefinite_naive(A)
21 True
22
23 """
24 if A.nrows() == 0:
25 return True # vacuously
26 return A.is_hermitian() and all( v >= 0 for v in A.eigenvalues() )
27
28
29 def ldlt_naive(A):
30 r"""
31 Perform a pivoted `LDL^{T}` factorization of the Hermitian
32 positive-semidefinite matrix `A`.
33
34 This is a naive, recursive implementation that is inefficient due
35 to Python's lack of tail-call optimization. The pivot strategy is
36 to choose the largest diagonal entry of the matrix at each step,
37 and to permute it into the top-left position. Ultimately this
38 results in a factorization `A = PLDL^{T}P^{T}`, where `P` is a
39 permutation matrix, `L` is unit-lower-triangular, and `D` is
40 diagonal decreasing from top-left to bottom-right.
41
42 ALGORITHM:
43
44 The algorithm is based on the discussion in Golub and Van Loan, but with
45 some "typos" fixed.
46
47 OUTPUT:
48
49 A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where,
50
51 * `P` is a permutaiton matrix
52 * `L` is unit lower-triangular
53 * `D` is a diagonal matrix whose entries are decreasing from top-left
54 to bottom-right
55
56 SETUP::
57
58 sage: from mjo.ldlt import ldlt_naive, is_positive_semidefinite_naive
59
60 EXAMPLES:
61
62 All three factors should be the identity when the original matrix is::
63
64 sage: I = matrix.identity(QQ,4)
65 sage: P,L,D = ldlt_naive(I)
66 sage: P == I and L == I and D == I
67 True
68
69 TESTS:
70
71 Ensure that a "random" positive-semidefinite matrix is factored correctly::
72
73 sage: set_random_seed()
74 sage: n = ZZ.random_element(5)
75 sage: A = matrix.random(QQ, n)
76 sage: A = A*A.transpose()
77 sage: is_positive_semidefinite_naive(A)
78 True
79 sage: P,L,D = ldlt_naive(A)
80 sage: A == P*L*D*L.transpose()*P.transpose()
81 True
82
83 """
84 n = A.nrows()
85
86 # Use the fraction field of the given matrix so that division will work
87 # when (for example) our matrix consists of integer entries.
88 ring = A.base_ring().fraction_field()
89
90 if n == 0 or n == 1:
91 # We can get n == 0 if someone feeds us a trivial matrix.
92 P = matrix.identity(ring, n)
93 L = matrix.identity(ring, n)
94 D = A
95 return (P,L,D)
96
97 A1 = A.change_ring(ring)
98 diags = A1.diagonal()
99 s = diags.index(max(diags))
100 P1 = copy(A1.matrix_space().identity_matrix())
101 P1.swap_rows(0,s)
102 A1 = P1.T * A1 * P1
103 alpha1 = A1[0,0]
104
105 # Golub and Van Loan mention in passing what to do here. This is
106 # only sensible if the matrix is positive-semidefinite, because we
107 # are assuming that we can set everything else to zero as soon as
108 # we hit the first on-diagonal zero.
109 if alpha1 == 0:
110 P = A1.matrix_space().identity_matrix()
111 L = P
112 D = A1.matrix_space().zero()
113 return (P,L,D)
114
115 v1 = A1[1:n,0]
116 A2 = A1[1:,1:]
117
118 P2, L2, D2 = ldlt_naive(A2 - (v1*v1.transpose())/alpha1)
119
120 P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)],
121 [0*v1, P2]])
122 L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)],
123 [P2.transpose()*v1/alpha1, L2]])
124 D1 = block_matrix(2,2, [[alpha1, ZZ(0)],
125 [0*v1, D2]])
126
127 return (P1,L1,D1)
128
129
130
131 def ldlt_fast(A):
132 r"""
133 Perform a fast, pivoted `LDL^{T}` factorization of the Hermitian
134 positive-semidefinite matrix `A`.
135
136 This function is much faster than ``ldlt_naive`` because the
137 tail-recursion has been unrolled into a loop.
138 """
139 ring = A.base_ring().fraction_field()
140 A = A.change_ring(ring)
141
142 # Keep track of the permutations in a vector rather than in a
143 # matrix, for efficiency.
144 n = A.nrows()
145 p = list(range(n))
146
147 for k in range(n):
148 # We need to loop once for every diagonal entry in the
149 # matrix. So, as many times as it has rows/columns. At each
150 # step, we obtain the permutation needed to put things in the
151 # right place, then the "next" entry (alpha) of D, and finally
152 # another column of L.
153 diags = A.diagonal()[k:n]
154 alpha = max(diags)
155
156 # We're working *within* the matrix ``A``, so every index is
157 # offset by k. For example: after the second step, we should
158 # only be looking at the lower 3-by-3 block of a 5-by-5 matrix.
159 s = k + diags.index(alpha)
160
161 # Move the largest diagonal element up into the top-left corner
162 # of the block we're working on (the one starting from index k,k).
163 # Presumably this is faster than hitting the thing with a
164 # permutation matrix.
165 #
166 # Since "L" is stored in the lower-left "half" of "A", it's a
167 # good thing that we need to permute "L," too. This is due to
168 # how P2.T appears in the recursive algorithm applied to the
169 # "current" column of L There, P2.T is computed recusively, as
170 # 1 x P3.T, and P3.T = 1 x P4.T, etc, from the bottom up. All
171 # are eventually applied to "v" in order. Here we're working
172 # from the top down, and rather than keep track of what
173 # permutations we need to perform, we just perform them as we
174 # go along. No recursion needed.
175 A.swap_columns(k,s)
176 A.swap_rows(k,s)
177
178 # Update the permutation "matrix" with the swap we just did.
179 p_k = p[k]
180 p[k] = p[s]
181 p[s] = p_k
182
183 # Now the largest diagonal is in the top-left corner of the
184 # block below and to the right of index k,k. When alpha is
185 # zero, we can just leave the rest of the D/L entries
186 # zero... which is exactly how they start out.
187 if alpha != 0:
188 # Update the "next" block of A that we'll work on during
189 # the following iteration. I think it's faster to get the
190 # entries of a row than a column here?
191 for i in range(n-k-1):
192 for j in range(i+1):
193 A[k+1+j,k+1+i] = A[k+1+j,k+1+i] - A[k,k+1+j]*A[k,k+1+i]/alpha
194 A[k+1+i,k+1+j] = A[k+1+j,k+1+i] # keep it symmetric!
195
196 for i in range(n-k-1):
197 # Store the "new" (kth) column of L, being sure to set
198 # the lower-left "half" from the upper-right "half"
199 A[k+i+1,k] = A[k,k+1+i]/alpha
200
201 MS = A.matrix_space()
202 P = MS.matrix(lambda i,j: p[j] == i)
203 D = MS.diagonal_matrix(A.diagonal())
204
205 for i in range(n):
206 A[i,i] = 1
207 for j in range(i+1,n):
208 A[i,j] = 0
209
210 return P,A,D
211
212
213 def block_ldlt_naive(A, check_hermitian=False):
214 r"""
215 Perform a block-`LDL^{T}` factorization of the Hermitian
216 matrix `A`.
217
218 This is a naive, recursive implementation akin to
219 ``ldlt_naive()``, where the pivots (and resulting diagonals) are
220 either `1 \times 1` or `2 \times 2` blocks. The pivots are chosen
221 using the Bunch-Kaufmann scheme that is both fast and numerically
222 stable.
223
224 OUTPUT:
225
226 A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where,
227
228 * `P` is a permutation matrix
229 * `L` is unit lower-triangular
230 * `D` is a block-diagonal matrix whose blocks are of size
231 one or two.
232
233 """
234 n = A.nrows()
235
236 # Use the fraction field of the given matrix so that division will work
237 # when (for example) our matrix consists of integer entries.
238 ring = A.base_ring().fraction_field()
239
240 if n == 0 or n == 1:
241 # We can get n == 0 if someone feeds us a trivial matrix.
242 # For block-LDLT, n=2 is a base case.
243 P = matrix.identity(ring, n)
244 L = matrix.identity(ring, n)
245 D = A
246 return (P,L,D)
247
248 alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8)
249 A1 = A.change_ring(ring)
250
251 # Bunch-Kaufmann step 1, Higham step "zero." We use Higham's
252 # "omega" notation instead of Bunch-Kaufman's "lamda" because
253 # lambda means other things in the same context.
254 column_1_subdiag = [ a_i1.abs() for a_i1 in A1[1:,0].list() ]
255 omega_1 = max([ a_i1 for a_i1 in column_1_subdiag ])
256
257 if omega_1 == 0:
258 # "There's nothing to do at this step of the algorithm,"
259 # which means that our matrix looks like,
260 #
261 # [ 1 0 ]
262 # [ 0 B ]
263 #
264 # We could still do a pivot_one_by_one() here, but it would
265 # pointlessly subract a bunch of zeros and multiply by one.
266 B = A1[1:,1:]
267 one = matrix(ring, 1, 1, [1])
268 P2, L2, D2 = block_ldlt_naive(B)
269 P1 = block_diagonal_matrix(one, P2)
270 L1 = block_diagonal_matrix(one, L2)
271 D1 = block_diagonal_matrix(one, D2)
272 return (P1,L1,D1)
273
274 def pivot_one_by_one(M, c=None):
275 # Perform a one-by-one pivot on "M," swapping row/columns "c".
276 # If "c" is None, no swap is performed.
277 if c is not None:
278 P1 = copy(M.matrix_space().identity_matrix())
279 P1.swap_rows(0,c)
280 M = P1.T * M * P1
281
282 # The top-left entry is now our 1x1 pivot.
283 C = M[1:n,0]
284 B = M[1:,1:]
285
286 P2, L2, D2 = block_ldlt_naive(B - (C*C.transpose())/M[0,0])
287
288 if c is None:
289 P1 = block_matrix(2,2, [[ZZ(1), ZZ(0)],
290 [0*C, P2]])
291 else:
292 P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)],
293 [0*C, P2]])
294
295 L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)],
296 [P2.transpose()*C/M[0,0], L2]])
297 D1 = block_matrix(2,2, [[M[0,0], ZZ(0)],
298 [0*C, D2]])
299
300 return (P1,L1,D1)
301
302
303 if A1[0,0].abs() > alpha*omega_1:
304 return pivot_one_by_one(A1)
305
306 r = 1 + column_1_subdiag.index(omega_1)
307
308 # If the matrix is Hermitian, we need only look at the above-
309 # diagonal entries to find the off-diagonal of maximal magnitude.
310 omega_r = max( a_rj.abs() for a_rj in A1[:r,r].list() )
311
312 if A1[0,0].abs()*omega_r >= alpha*(omega_1**2):
313 return pivot_one_by_one(A1)
314
315 if A1[r,r].abs() > alpha*omega_r:
316 # Higham step (3)
317 # Another 1x1 pivot, but this time swapping indices 0,r.
318 return pivot_one_by_one(A1,r)
319
320 # Higham step (4)
321 # If we made it here, we have to do a 2x2 pivot.
322 P1 = copy(A1.matrix_space().identity_matrix())
323 P1.swap_rows(1,r)
324 A1 = P1.T * A1 * P1
325
326 # The top-left 2x2 submatrix is now our pivot.
327 E = A1[:2,:2]
328 C = A1[2:n,0:2]
329 B = A1[2:,2:]
330
331 if B.nrows() == 0:
332 # We have a two-by-two matrix that we can do nothing
333 # useful with.
334 P = matrix.identity(ring, n)
335 L = matrix.identity(ring, n)
336 D = A1
337 return (P,L,D)
338
339 P2, L2, D2 = block_ldlt_naive(B - (C*E.inverse()*C.transpose()))
340
341 P1 = P1*block_matrix(2,2, [[ZZ(1), ZZ(0)],
342 [0*C, P2]])
343
344 L1 = block_matrix(2,2, [[ZZ(1), ZZ(0)],
345 [P2.transpose()*C*E.inverse(), L2]])
346 D1 = block_diagonal_matrix(E,D2)
347
348 return (P1,L1,D1)
349
350
351 def block_ldlt(A):
352 r"""
353 Perform a block-`LDL^{T}` factorization of the Hermitian
354 matrix `A`.
355
356 OUTPUT:
357
358 A triple `(P,L,D)` such that `A = PLDL^{T}P^{T}` and where,
359
360 * `P` is a permutation matrix
361 * `L` is unit lower-triangular
362 * `D` is a block-diagonal matrix whose blocks are of size
363 one or two.
364 """
365
366 # We have to make at least one copy of the input matrix so that we
367 # can change the base ring to its fraction field. Both "L" and the
368 # intermediate Schur complements will potentially have entries in
369 # the fraction field. However, we don't need to make *two* copies.
370 # We can't store the entries of "D" and "L" in the same matrix if
371 # "D" will contain any 2x2 blocks; but we can still store the
372 # entries of "L" in the copy of "A" that we're going to make.
373 # Contrast this with the non-block LDL^T factorization where the
374 # entries of both "L" and "D" overwrite the lower-left half of "A".
375 #
376 # This grants us an additional speedup, since we don't have to
377 # permute the rows/columns of "L" *and* "A" at each iteration.
378 ring = A.base_ring().fraction_field()
379 A = A.change_ring(ring)
380 MS = A.matrix_space()
381
382 # The magic constant used by Bunch-Kaufman
383 alpha = (1 + ZZ(17).sqrt()) * ~ZZ(8)
384
385 # Keep track of the permutations and diagonal blocks in a vector
386 # rather than in a matrix, for efficiency.
387 n = A.nrows()
388 p = list(range(n))
389 d = []
390
391 def swap_rows_columns(M, k, s):
392 r"""
393 Swap rows/columns ``k`` and ``s`` of the matrix ``M``, and update
394 the list ``p`` accordingly.
395 """
396 if s > k:
397 # s == k would swap row/column k with itself, and we don't
398 # actually want to perform the identity permutation. If
399 # you work out the recursive factorization by hand, you'll
400 # notice that the rows/columns of "L" need to be permuted
401 # as well. A nice side effect of storing "L" within "A"
402 # itself is that we can skip that step. The first column
403 # of "L" is hit by all of the transpositions in
404 # succession, and the second column is hit by all but the
405 # first transposition, and so on.
406 M.swap_columns(k,s)
407 M.swap_rows(k,s)
408
409 p_k = p[k]
410 p[k] = p[s]
411 p[s] = p_k
412
413 # No return value, we're only interested in the "side effects"
414 # of modifing the matrix M (by reference) and the permutation
415 # list p (which is in scope when this function is defined).
416 return
417
418
419 def pivot1x1(M, k, s):
420 r"""
421 Perform a 1x1 pivot swapping rows/columns `k` and `s >= k`.
422 Relies on the fact that matrices are passed by reference,
423 since for performance reasons this routine should overwrite
424 its argument. Updates the local variables ``p`` and ``d`` as
425 well.
426 """
427 swap_rows_columns(M,k,s)
428
429 # Now the pivot is in the (k,k)th position.
430 d.append( matrix(ring, 1, [[A[k,k]]]) )
431
432 # Compute the Schur complement that we'll work on during
433 # the following iteration, and store it back in the lower-
434 # right-hand corner of "A".
435 for i in range(n-k-1):
436 for j in range(i+1):
437 A[k+1+j,k+1+i] = ( A[k+1+j,k+1+i] -
438 A[k,k+1+j]*A[k,k+1+i]/A[k,k] )
439 A[k+1+i,k+1+j] = A[k+1+j,k+1+i] # keep it symmetric!
440
441 for i in range(n-k-1):
442 # Store the new (kth) column of "L" within the lower-
443 # left-hand corner of "A", being sure to set the lower-
444 # left entries from the upper-right ones to avoid
445 # collisions.
446 A[k+i+1,k] = A[k,k+1+i]/A[k,k]
447
448 # No return value, only the desired side effects of updating
449 # p, d, and A.
450 return
451
452 k = 0
453 while k < n:
454 # At each step, we're considering the k-by-k submatrix
455 # contained in the lower-right half of "A", because that's
456 # where we're storing the next iterate. So our indices are
457 # always "k" greater than those of Higham or B&K. Note that
458 # ``n == 0`` is handled by skipping this loop entirely.
459
460 if k == (n-1):
461 # Handle this trivial case manually, since otherwise the
462 # algorithm's references to the e.g. "subdiagonal" are
463 # meaningless.
464 d.append( matrix(ring, 1, [[A[k,k]]]) )
465 k += 1
466 continue
467
468 # Find the largest subdiagonal entry (in magnitude) in the
469 # kth column. This occurs prior to Step (1) in Higham,
470 # but is part of Step (1) in Bunch and Kaufman. We adopt
471 # Higham's "omega" notation instead of B&K's "lambda"
472 # because "lambda" can lead to some confusion. Beware:
473 # the subdiagonals of our matrix are being overwritten!
474 # So we actually use the corresponding row entries instead.
475 column_1_subdiag = [ a_ki.abs() for a_ki in A[k,k+1:].list() ]
476 omega_1 = max([ a_ki for a_ki in column_1_subdiag ])
477
478 if omega_1 == 0:
479 # In this case, our matrix looks like
480 #
481 # [ a 0 ]
482 # [ 0 B ]
483 #
484 # and we can simply skip to the next step after recording
485 # the 1x1 pivot "1" in the top-left position.
486 d.append( matrix(ring, 1, [[A[k,k]]]) )
487 k += 1
488 continue
489
490 if A[k,k].abs() > alpha*omega_1:
491 # This is the first case in Higham's Step (1), and B&K's
492 # Step (2). Note that we have skipped the part of B&K's
493 # Step (1) where we determine "r", since "r" is not yet
494 # needed and we may waste some time computing it
495 # otherwise. We are performing a 1x1 pivot, but the
496 # rows/columns are already where we want them, so nothing
497 # needs to be permuted.
498 pivot1x1(A,k,k)
499 k += 1
500 continue
501
502 # Now back to Step (1) of Higham, where we find the index "r"
503 # that corresponds to omega_1. This is the "else" branch of
504 # Higham's Step (1).
505 r = k + 1 + column_1_subdiag.index(omega_1)
506
507 # Continuing the "else" branch of Higham's Step (1), and onto
508 # B&K's Step (3) where we find the largest off-diagonal entry
509 # (in magniture) in column "r". Since the matrix is Hermitian,
510 # we need only look at the above-diagonal entries to find the
511 # off-diagonal of maximal magnitude. (Beware: the subdiagonal
512 # entries are being overwritten.)
513 omega_r = max( a_rj.abs() for a_rj in A[:r,r].list() )
514
515 if A[k,k].abs()*omega_r >= alpha*(omega_1**2):
516 # Step (2) in Higham or Step (4) in B&K.
517 pivot1x1(A,k,k)
518 k += 1
519 continue
520
521 if A[r,r].abs() > alpha*omega_r:
522 # This is Step (3) in Higham or Step (5) in B&K. Still a 1x1
523 # pivot, but this time we need to swap rows/columns k and r.
524 pivot1x1(A,k,r)
525 k += 1
526 continue
527
528 # If we've made it this far, we're at Step (4) in Higham or
529 # Step (6) in B&K, where we perform a 2x2 pivot.
530 swap_rows_columns(A,k+1,r)
531
532 # The top-left 2x2 submatrix (starting at position k,k) is now
533 # our pivot.
534 E = A[k:k+2,k:k+2]
535 d.append(E)
536
537 C = A[k+2:n,k:k+2]
538 B = A[k+2:,k+2:]
539
540 # We don't actually need the inverse of E, what we really need
541 # is C*E.inverse(), and that can be found by setting
542 #
543 # C*E.inverse() == X <====> XE == C.
544 #
545 # The latter can be found much more easily by solving a system.
546 # Note: I do not actually know that sage solves the system more
547 # intelligently, but this is still The Right Thing To Do.
548 CE_inverse = E.solve_left(C)
549
550 schur_complement = B - (CE_inverse*C.transpose())
551
552 # Compute the Schur complement that we'll work on during
553 # the following iteration, and store it back in the lower-
554 # right-hand corner of "A".
555 for i in range(n-k-2):
556 for j in range(i+1):
557 A[k+2+j,k+2+i] = A[k+2+j,k+2+i] - schur_complement[j,i]
558 A[k+2+i,k+2+j] = A[k+2+j,k+2+i] # keep it symmetric!
559
560 # The on- and above-diagonal entries of "L" will be fixed
561 # later, so we only need to worry about the lower-left entry
562 # of the 2x2 identity matrix that belongs at the top of the
563 # new column of "L".
564 A[k+1,k] = 0
565 for i in range(n-k-2):
566 for j in range(2):
567 # Store the new (k and (k+1)st) columns of "L" within
568 # the lower-left-hand corner of "A", being sure to set
569 # the lower-left entries from the upper-right ones to
570 # avoid collisions.
571 A[k+i+2,k+j] = CE_inverse[i,j]
572
573
574 k += 2
575
576 MS = A.matrix_space()
577 P = MS.matrix(lambda i,j: p[j] == i)
578
579 # Warning: when n == 0, this works, but returns a matrix
580 # whose (nonexistent) entries are in ZZ rather than in
581 # the base ring of P and L.
582 D = block_diagonal_matrix(d)
583
584 # Overwrite the diagonal and upper-right half of "A",
585 # since we're about to return it as the unit-lower-
586 # triangular "L".
587 for i in range(n):
588 A[i,i] = 1
589 for j in range(i+1,n):
590 A[i,j] = 0
591
592 return (P,A,D)