# we see in things like x = 1*e1 + 2*e2.
vector_basis = basis
- from sage.structure.element import is_Matrix
- basis_is_matrices = False
-
degree = 0
if n > 0:
- if is_Matrix(basis[0]):
- if basis[0].is_square():
- # TODO: this ugly is_square() hack works around the problem
- # of passing to_matrix()ed vectors in as the basis from a
- # subalgebra. They aren't REALLY matrices, at least not of
- # the type that we assume here... Ugh.
- basis_is_matrices = True
- from mjo.eja.eja_utils import _vec2mat
- vector_basis = tuple( map(_mat2vec,basis) )
- degree = basis[0].nrows()**2
- else:
- # convert from column matrices to vectors, yuck
- basis = tuple( map(_mat2vec,basis) )
- vector_basis = basis
- degree = basis[0].degree()
- else:
- degree = basis[0].degree()
+ # Works on both column and square matrices...
+ degree = len(basis[0].list())
- # Build an ambient space that fits...
+ # Build an ambient space that fits our matrix basis when
+ # written out as "long vectors."
V = VectorSpace(field, degree)
- # We overwrite the name "vector_basis" in a second, but never modify it
- # in place, to this effectively makes a copy of it.
- deortho_vector_basis = vector_basis
+ # The matrix that will hole the orthonormal -> unorthonormal
+ # coordinate transformation.
self._deortho_matrix = None
if orthonormalize:
- from mjo.eja.eja_utils import gram_schmidt
- if basis_is_matrices:
- vector_ip = lambda x,y: inner_product(_vec2mat(x), _vec2mat(y))
- vector_basis = gram_schmidt(vector_basis, vector_ip)
- else:
- vector_basis = gram_schmidt(vector_basis, inner_product)
+ # Save a copy of the un-orthonormalized basis for later.
+ # Convert it to ambient V (vector) coordinates while we're
+ # at it, because we'd have to do it later anyway.
+ deortho_vector_basis = tuple( V(b.list()) for b in basis )
- # Normalize the "matrix" basis, too!
- basis = vector_basis
-
- if basis_is_matrices:
- basis = tuple( map(_vec2mat,basis) )
+ from mjo.eja.eja_utils import gram_schmidt
+ basis = gram_schmidt(basis, inner_product)
- # Save the matrix "basis" for later... this is the last time we'll
- # reference it in this constructor.
- if basis_is_matrices:
- self._matrix_basis = basis
- else:
- MS = MatrixSpace(self.base_ring(), degree, 1)
- self._matrix_basis = tuple( MS(b) for b in basis )
+ # Save the (possibly orthonormalized) matrix basis for
+ # later...
+ self._matrix_basis = basis
- # Now create the vector space for the algebra...
+ # Now create the vector space for the algebra, which will have
+ # its own set of non-ambient coordinates (in terms of the
+ # supplied basis).
+ vector_basis = tuple( V(b.list()) for b in basis )
W = V.span_of_basis( vector_basis, check=check_axioms)
if orthonormalize:
# Now we actually compute the multiplication and inner-product
# tables/matrices using the possibly-orthonormalized basis.
self._inner_product_matrix = matrix.zero(field, n)
- self._multiplication_table = [ [0 for j in range(i+1)] for i in range(n) ]
+ self._multiplication_table = [ [0 for j in range(i+1)]
+ for i in range(n) ]
- print("vector_basis:")
- print(vector_basis)
# Note: the Jordan and inner-products are defined in terms
# of the ambient basis. It's important that their arguments
# are in ambient coordinates as well.
for i in range(n):
for j in range(i+1):
# ortho basis w.r.t. ambient coords
- q_i = vector_basis[i]
- q_j = vector_basis[j]
-
- if basis_is_matrices:
- q_i = _vec2mat(q_i)
- q_j = _vec2mat(q_j)
+ q_i = basis[i]
+ q_j = basis[j]
elt = jordan_product(q_i, q_j)
ip = inner_product(q_i, q_j)
- if basis_is_matrices:
- # do another mat2vec because the multiplication
- # table is in terms of vectors
- elt = _mat2vec(elt)
-
- # TODO: the jordan product turns things back into
- # matrices here even if they're supposed to be
- # vectors. ugh. Can we get rid of vectors all together
- # please?
- elt = W.coordinate_vector(elt)
+ # The jordan product returns a matrixy answer, so we
+ # have to convert it to the algebra coordinates.
+ elt = W.coordinate_vector(V(elt.list()))
self._multiplication_table[i][j] = self.from_vector(elt)
self._inner_product_matrix[i,j] = ip
self._inner_product_matrix[j,i] = ip
"""
def __init__(self, n, **kwargs):
- def jordan_product(x,y):
- P = x.parent()
- return P(tuple( xi*yi for (xi,yi) in zip(x,y) ))
- def inner_product(x,y):
- return x.inner_product(y)
+ if n == 0:
+ jordan_product = lambda x,y: x
+ inner_product = lambda x,y: x
+ else:
+ def jordan_product(x,y):
+ P = x.parent()
+ return P( xi*yi for (xi,yi) in zip(x,y) )
+
+ def inner_product(x,y):
+ return (x.T*y)[0,0]
# New defaults for keyword arguments. Don't orthonormalize
# because our basis is already orthonormal with respect to our
if "orthonormalize" not in kwargs: kwargs["orthonormalize"] = False
if "check_axioms" not in kwargs: kwargs["check_axioms"] = False
-
- standard_basis = FreeModule(ZZ, n).basis()
- super(HadamardEJA, self).__init__(standard_basis,
- jordan_product,
- inner_product,
- **kwargs)
+ column_basis = tuple( b.column() for b in FreeModule(ZZ, n).basis() )
+ super().__init__(column_basis, jordan_product, inner_product, **kwargs)
self.rank.set_cache(n)
if n == 0:
def __init__(self, elt, orthonormalize=True, **kwargs):
superalgebra = elt.parent()
+ # TODO: going up to the superalgebra dimension here is
+ # overkill. We should append p vectors as rows to a matrix
+ # and continually rref() it until the rank stops going
+ # up. When n=10 but the dimension of the algebra is 1, that
+ # can save a shitload of time (especially over AA).
powers = tuple( elt**k for k in range(superalgebra.dimension()) )
power_vectors = ( p.to_vector() for p in powers )
P = matrix(superalgebra.base_ring(), power_vectors)
- if orthonormalize:
- basis = powers # let god sort 'em out
- else:
- # Echelonize the matrix ourselves, because otherwise the
- # call to P.pivot_rows() below can choose a non-optimal
- # row-reduction algorithm. In particular, scaling can
- # help over AA because it avoids the RecursionError that
- # gets thrown when we have to look too hard for a root.
- #
- # Beware: QQ supports an entirely different set of "algorithm"
- # keywords than do AA and RR.
- algo = None
- if superalgebra.base_ring() is not QQ:
- algo = "scaled_partial_pivoting"
- P.echelonize(algorithm=algo)
-
- # In this case, we just need to figure out which elements
- # of the "powers" list are redundant... First compute the
- # vector subspace spanned by the powers of the given
- # element.
-
- # Figure out which powers form a linearly-independent set.
- ind_rows = P.pivot_rows()
-
- # Pick those out of the list of all powers.
- basis = tuple(map(powers.__getitem__, ind_rows))
+ # Echelonize the matrix ourselves, because otherwise the
+ # call to P.pivot_rows() below can choose a non-optimal
+ # row-reduction algorithm. In particular, scaling can
+ # help over AA because it avoids the RecursionError that
+ # gets thrown when we have to look too hard for a root.
+ #
+ # Beware: QQ supports an entirely different set of "algorithm"
+ # keywords than do AA and RR.
+ algo = None
+ if superalgebra.base_ring() is not QQ:
+ algo = "scaled_partial_pivoting"
+ P.echelonize(algorithm=algo)
+
+ # Figure out which powers form a linearly-independent set.
+ ind_rows = P.pivot_rows()
+
+ # Pick those out of the list of all powers.
+ basis = tuple(map(powers.__getitem__, ind_rows))
super().__init__(superalgebra,