X-Git-Url: http://gitweb.michael.orlitzky.com/?a=blobdiff_plain;f=mjo%2Fcone%2Fcone.py;h=a5482b3aa95f7198938007c4be615c4e7a97e17d;hb=b97553aaaf9734644bee13bf484014f817456b26;hp=2296e3fc010db71091e530b211c73ada05962f81;hpb=3b659b1d0440daf3ff7bd8cf3cf53f90523a1609;p=sage.d.git diff --git a/mjo/cone/cone.py b/mjo/cone/cone.py index 2296e3f..a5482b3 100644 --- a/mjo/cone/cone.py +++ b/mjo/cone/cone.py @@ -8,82 +8,120 @@ addsitedir(abspath('../../')) from sage.all import * -def random_cone(min_dim=None, max_dim=None, min_rays=None, max_rays=None): +def discrete_complementarity_set(K): r""" - Generate a random rational convex polyhedral cone. + Compute the discrete complementarity set of this cone. - Lower and upper bounds may be provided for both the dimension of the - ambient space and the number of generating rays of the cone. Any - parameters left unspecified will be chosen randomly. + The complementarity set of this cone is the set of all orthogonal + pairs `(x,s)` such that `x` is in this cone, and `s` is in its + dual. The discrete complementarity set restricts `x` and `s` to be + generators of their respective cones. - INPUT: + OUTPUT: + + A list of pairs `(x,s)` such that, - - ``min_dim`` (default: random) -- The minimum dimension of the ambient - lattice. + * `x` is in this cone. + * `x` is a generator of this cone. + * `s` is in this cone's dual. + * `s` is a generator of this cone's dual. + * `x` and `s` are orthogonal. - - ``max_dim`` (default: random) -- The maximum dimension of the ambient - lattice. + EXAMPLES: - - ``min_rays`` (default: random) -- The minimum number of generating rays - of the cone. + The discrete complementarity set of the nonnegative orthant consists + of pairs of standard basis vectors:: - - ``max_rays`` (default: random) -- The maximum number of generating rays - of the cone. + sage: K = Cone([(1,0),(0,1)]) + sage: discrete_complementarity_set(K) + [((1, 0), (0, 1)), ((0, 1), (1, 0))] - OUTPUT: + If the cone consists of a single ray, the second components of the + discrete complementarity set should generate the orthogonal + complement of that ray:: + + sage: K = Cone([(1,0)]) + sage: discrete_complementarity_set(K) + [((1, 0), (0, 1)), ((1, 0), (0, -1))] + sage: K = Cone([(1,0,0)]) + sage: discrete_complementarity_set(K) + [((1, 0, 0), (0, 1, 0)), + ((1, 0, 0), (0, -1, 0)), + ((1, 0, 0), (0, 0, 1)), + ((1, 0, 0), (0, 0, -1))] - A new, randomly generated cone. + When the cone is the entire space, its dual is the trivial cone, so + the discrete complementarity set is empty:: + + sage: K = Cone([(1,0),(-1,0),(0,1),(0,-1)]) + sage: discrete_complementarity_set(K) + [] TESTS: - It's hard to test the output of a random process, but we can at - least make sure that we get a cone back:: + The complementarity set of the dual can be obtained by switching the + components of the complementarity set of the original cone:: - sage: from sage.geometry.cone import is_Cone - sage: K = random_cone() - sage: is_Cone(K) # long time + sage: K1 = random_cone(max_dim=10, max_rays=10) + sage: K2 = K1.dual() + sage: expected = [(x,s) for (s,x) in discrete_complementarity_set(K2)] + sage: actual = discrete_complementarity_set(K1) + sage: actual == expected True """ + V = K.lattice().vector_space() - def random_min_max(l,u): - r""" - We need to handle four cases to prevent us from doing - something stupid like having an upper bound that's lower than - our lower bound. And we would need to repeat all of that logic - for the dimension/rays, so we consolidate it here. - """ - if l is None and u is None: - # They're both random, just return a random nonnegative - # integer. - return ZZ.random_element().abs() + # Convert the rays to vectors so that we can compute inner + # products. + xs = [V(x) for x in K.rays()] + ss = [V(s) for s in K.dual().rays()] - if l is not None and u is not None: - # Both were specified. Again, just make up a number and - # return it. If the user wants to give us u < l then he - # can have an exception. - return ZZ.random_element(l,u) + return [(x,s) for x in xs for s in ss if x.inner_product(s) == 0] - if l is not None and u is None: - # In this case, we're generating the upper bound randomly - # GIVEN A LOWER BOUND. So we add a random nonnegative - # integer to the given lower bound. - u = l + ZZ.random_element().abs() - return ZZ.random_element(l,u) - # Here we must be in the only remaining case, where we are - # given an upper bound but no lower bound. We might as well - # use zero. - return ZZ.random_element(0,u) +def LL(K): + r""" + Compute the space `\mathbf{LL}` of all Lyapunov-like transformations + on this cone. - d = random_min_max(min_dim, max_dim) - r = random_min_max(min_rays, max_rays) + OUTPUT: - L = ToricLattice(d) - rays = [L.random_element() for i in range(0,r)] + A ``MatrixSpace`` object `M` such that every matrix `L \in M` is + Lyapunov-like on this cone. + + """ + V = K.lattice().vector_space() + + C_of_K = discrete_complementarity_set(K) + + matrices = [x.tensor_product(s) for (x,s) in C_of_K] + + # Sage doesn't think matrices are vectors, so we have to convert + # our matrices to vectors explicitly before we can figure out how + # many are linearly-indepenedent. + # + # The space W has the same base ring as V, but dimension + # dim(V)^2. So it has the same dimension as the space of linear + # transformations on V. In other words, it's just the right size + # to create an isomorphism between it and our matrices. + W = VectorSpace(V.base_ring(), V.dimension()**2) + + # Turn our matrices into long vectors... + vectors = [ W(m.list()) for m in matrices ] + + # Vector space representation of Lyapunov-like matrices + # (i.e. vec(L) where L is Luapunov-like). + LL_vector = W.span(vectors).complement() + + # Now construct an ambient MatrixSpace in which to stick our + # transformations. + M = MatrixSpace(V.base_ring(), V.dimension()) + + matrices = [ M(v.list()) for v in LL_vector.basis() ] + + return matrices - # We pass the lattice in case there are no rays. - return Cone(rays, lattice=L) def lyapunov_rank(K): @@ -129,17 +167,18 @@ def lyapunov_rank(K): REFERENCES: - 1. M.S. Gowda and J. Tao. On the bilinearity rank of a proper cone - and Lyapunov-like transformations, Mathematical Programming, 147 + .. [Gowda/Tao] M.S. Gowda and J. Tao. On the bilinearity rank of a proper + cone and Lyapunov-like transformations, Mathematical Programming, 147 (2014) 155-170. - 2. G. Rudolf, N. Noyan, D. Papp, and F. Alizadeh, Bilinear + .. [Rudolf et al.] G. Rudolf, N. Noyan, D. Papp, and F. Alizadeh, Bilinear optimality constraints for the cone of positive polynomials, Mathematical Programming, Series B, 129 (2011) 5-31. EXAMPLES: - The nonnegative orthant in `\mathbb{R}^{n}` always has rank `n`:: + The nonnegative orthant in `\mathbb{R}^{n}` always has rank `n` + [Rudolf et al.]_:: sage: positives = Cone([(1,)]) sage: lyapunov_rank(positives) @@ -147,23 +186,25 @@ def lyapunov_rank(K): sage: quadrant = Cone([(1,0), (0,1)]) sage: lyapunov_rank(quadrant) 2 - sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)]) + sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)]) sage: lyapunov_rank(octant) 3 - The `L^{3}_{1}` cone is known to have a Lyapunov rank of one:: + The `L^{3}_{1}` cone is known to have a Lyapunov rank of one + [Rudolf et al.]_:: sage: L31 = Cone([(1,0,1), (0,-1,1), (-1,0,1), (0,1,1)]) sage: lyapunov_rank(L31) 1 - Likewise for the `L^{3}_{\infty}` cone:: + Likewise for the `L^{3}_{\infty}` cone [Rudolf et al.]_:: sage: L3infty = Cone([(0,1,1), (1,0,1), (0,-1,1), (-1,0,1)]) sage: lyapunov_rank(L3infty) 1 - The Lyapunov rank should be additive on a product of cones:: + The Lyapunov rank should be additive on a product of cones + [Rudolf et al.]_:: sage: L31 = Cone([(1,0,1), (0,-1,1), (-1,0,1), (0,1,1)]) sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)]) @@ -171,8 +212,8 @@ def lyapunov_rank(K): sage: lyapunov_rank(K) == lyapunov_rank(L31) + lyapunov_rank(octant) True - Two isomorphic cones should have the same Lyapunov rank. The cone - ``K`` in the following example is isomorphic to the nonnegative + Two isomorphic cones should have the same Lyapunov rank [Rudolf et al.]_. + The cone ``K`` in the following example is isomorphic to the nonnegative octant in `\mathbb{R}^{3}`:: sage: K = Cone([(1,2,3), (-1,1,0), (1,0,6)]) @@ -180,7 +221,7 @@ def lyapunov_rank(K): 3 The dual cone `K^{*}` of ``K`` should have the same Lyapunov rank as ``K`` - itself:: + itself [Rudolf et al.]_:: sage: K = Cone([(2,2,4), (-1,9,0), (2,0,6)]) sage: lyapunov_rank(K) == lyapunov_rank(K.dual()) @@ -188,32 +229,41 @@ def lyapunov_rank(K): TESTS: - The Lyapunov rank should be additive on a product of cones:: + The Lyapunov rank should be additive on a product of cones + [Rudolf et al.]_:: - sage: K1 = random_cone(0,10,0,10) - sage: K2 = random_cone(0,10,0,10) + sage: K1 = random_cone(max_dim=10, max_rays=10) + sage: K2 = random_cone(max_dim=10, max_rays=10) sage: K = K1.cartesian_product(K2) sage: lyapunov_rank(K) == lyapunov_rank(K1) + lyapunov_rank(K2) True The dual cone `K^{*}` of ``K`` should have the same Lyapunov rank as ``K`` - itself:: + itself [Rudolf et al.]_:: - sage: K = random_cone(0,10,0,10) + sage: K = random_cone(max_dim=10, max_rays=10) sage: lyapunov_rank(K) == lyapunov_rank(K.dual()) True + The Lyapunov rank of a proper polyhedral cone in `n` dimensions can + be any number between `1` and `n` inclusive, excluding `n-1` + [Gowda/Tao]_ (by accident, this holds for the trivial cone in a + trivial space as well):: + + sage: K = random_cone(max_dim=10, strictly_convex=True, solid=True) + sage: b = lyapunov_rank(K) + sage: n = K.lattice_dim() + sage: 1 <= b and b <= n + True + sage: b == n-1 + False + """ V = K.lattice().vector_space() - xs = [V(x) for x in K.rays()] - ss = [V(s) for s in K.dual().rays()] - - # WARNING: This isn't really C(K), it only contains the pairs - # (x,s) in C(K) where x,s are extreme in their respective cones. - C_of_K = [(x,s) for x in xs for s in ss if x.inner_product(s) == 0] + C_of_K = discrete_complementarity_set(K) - matrices = [x.column() * s.row() for (x,s) in C_of_K] + matrices = [x.tensor_product(s) for (x,s) in C_of_K] # Sage doesn't think matrices are vectors, so we have to convert # our matrices to vectors explicitly before we can figure out how