From c687f64bed580a9f3be4b98b5188a42caaf27eba Mon Sep 17 00:00:00 2001 From: Michael Orlitzky Date: Mon, 15 Jun 2015 14:54:10 -0400 Subject: [PATCH] Make basically_the_same() and rho() functions private. Add examples/tests for basically_the_same(). Add linear isomorphism test for lyapunov_rank(). --- mjo/cone/cone.py | 174 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 129 insertions(+), 45 deletions(-) diff --git a/mjo/cone/cone.py b/mjo/cone/cone.py index e9d0f1e..e2c43d8 100644 --- a/mjo/cone/cone.py +++ b/mjo/cone/cone.py @@ -8,12 +8,55 @@ addsitedir(abspath('../../')) from sage.all import * -def basically_the_same(K1,K2): +def _basically_the_same(K1, K2): r""" + Test whether or not ``K1`` and ``K2`` are "basically the same." + + This is a hack to get around the fact that it's difficult to tell + when two cones are linearly isomorphic. We have a proposition that + equates two cones, but represented over `\mathbb{Q}`, they are + merely linearly isomorphic (not equal). So rather than test for + equality, we test a list of properties that should be preserved + under an invertible linear transformation. + + OUTPUT: + ``True`` if ``K1`` and ``K2`` are basically the same, and ``False`` - otherwise. This is intended as a lazy way to check whether or not - ``K1`` and ``K2`` are linearly isomorphic (i.e. ``A(K1) == K2`` for - some invertible linear transformation ``A``). + otherwise. + + EXAMPLES: + + Any proper cone with three generators in `\mathbb{R}^{3}` is + basically the same as the nonnegative orthant:: + + sage: K1 = Cone([(1,0,0), (0,1,0), (0,0,1)]) + sage: K2 = Cone([(1,2,3), (3, 18, 4), (66, 51, 0)]) + sage: _basically_the_same(K1, K2) + True + + Negating a cone gives you another cone that is basically the same:: + + sage: K = Cone([(0,2,-5), (-6, 2, 4), (0, 51, 0)]) + sage: _basically_the_same(K, -K) + True + + TESTS: + + Any cone is basically the same as itself:: + + sage: K = random_cone(max_dim = 8) + sage: _basically_the_same(K, K) + True + + After applying an invertible matrix to the rows of a cone, the + result should be basically the same as the cone we started with:: + + sage: K1 = random_cone(max_dim = 8) + sage: A = random_matrix(QQ, K1.lattice_dim(), algorithm='unimodular') + sage: K2 = Cone( [ A*r for r in K1.rays() ], lattice=K1.lattice()) + sage: _basically_the_same(K1, K2) + True + """ if K1.lattice_dim() != K2.lattice_dim(): return False @@ -48,7 +91,7 @@ def basically_the_same(K1,K2): -def rho(K, K2=None): +def _rho(K, K2=None): r""" Restrict ``K`` into its own span, or the span of another cone. @@ -64,18 +107,18 @@ def rho(K, K2=None): EXAMPLES:: sage: K = Cone([(1,)]) - sage: rho(K) == K + sage: _rho(K) == K True sage: K2 = Cone([(1,0)]) - sage: rho(K2).rays() + sage: _rho(K2).rays() N(1) in 1-d lattice N sage: K3 = Cone([(1,0,0)]) - sage: rho(K3).rays() + sage: _rho(K3).rays() N(1) in 1-d lattice N - sage: rho(K2) == rho(K3) + sage: _rho(K2) == _rho(K3) True TESTS: @@ -84,7 +127,7 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8) - sage: K_S = rho(K) + sage: K_S = _rho(K) sage: K_S.is_solid() True @@ -93,7 +136,7 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8) - sage: K_S = rho(K, K.dual() ) + sage: K_S = _rho(K, K.dual() ) sage: K_S.lattice_dim() == K.dual().dim() True @@ -101,14 +144,14 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8) - sage: K.dim() == rho(K).dim() + sage: K.dim() == _rho(K).dim() True Nor should it affect the lineality of a cone:: sage: set_random_seed() sage: K = random_cone(max_dim = 8) - sage: K.lineality() == rho(K).lineality() + sage: K.lineality() == _rho(K).lineality() True No matter which space we restrict to, the lineality should not @@ -116,20 +159,20 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8) - sage: K.lineality() >= rho(K).lineality() + sage: K.lineality() >= _rho(K).lineality() True - sage: K.lineality() >= rho(K, K.dual()).lineality() + sage: K.lineality() >= _rho(K, K.dual()).lineality() True If we do this according to our paper, then the result is proper:: sage: set_random_seed() sage: K = random_cone(max_dim = 8, strictly_convex=False, solid=False) - sage: K_S = rho(K) - sage: K_SP = rho(K_S.dual()).dual() + sage: K_S = _rho(K) + sage: K_SP = _rho(K_S.dual()).dual() sage: K_SP.is_proper() True - sage: K_SP = rho(K_S, K_S.dual()) + sage: K_SP = _rho(K_S, K_S.dual()) sage: K_SP.is_proper() True @@ -137,11 +180,11 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8, strictly_convex=True, solid=False) - sage: K_S = rho(K) - sage: K_SP = rho(K_S.dual()).dual() + sage: K_S = _rho(K) + sage: K_SP = _rho(K_S.dual()).dual() sage: K_SP.is_proper() True - sage: K_SP = rho(K_S, K_S.dual()) + sage: K_SP = _rho(K_S, K_S.dual()) sage: K_SP.is_proper() True @@ -149,11 +192,11 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8, strictly_convex=False, solid=True) - sage: K_S = rho(K) - sage: K_SP = rho(K_S.dual()).dual() + sage: K_S = _rho(K) + sage: K_SP = _rho(K_S.dual()).dual() sage: K_SP.is_proper() True - sage: K_SP = rho(K_S, K_S.dual()) + sage: K_SP = _rho(K_S, K_S.dual()) sage: K_SP.is_proper() True @@ -161,24 +204,24 @@ def rho(K, K2=None): sage: set_random_seed() sage: K = random_cone(max_dim = 8, strictly_convex=True, solid=True) - sage: K_S = rho(K) - sage: K_SP = rho(K_S.dual()).dual() + sage: K_S = _rho(K) + sage: K_SP = _rho(K_S.dual()).dual() sage: K_SP.is_proper() True - sage: K_SP = rho(K_S, K_S.dual()) + sage: K_SP = _rho(K_S, K_S.dual()) sage: K_SP.is_proper() True - Test the proposition in our paper concerning the duals and + Test Proposition 7 in our paper concerning the duals and restrictions. Generate a random cone, then create a subcone of it. The operation of dual-taking should then commute with rho:: sage: set_random_seed() sage: J = random_cone(max_dim = 8, solid=False, strictly_convex=False) sage: K = Cone(random_sublist(J.rays(), 0.5), lattice=J.lattice()) - sage: K_W = rho(K, J) - sage: K_star_W_star = rho(K.dual(), J).dual() - sage: basically_the_same(K_W, K_star_W_star) + sage: K_W_star = _rho(K, J).dual() + sage: K_star_W = _rho(K.dual(), J) + sage: _basically_the_same(K_W_star, K_star_W) True :: @@ -186,9 +229,9 @@ def rho(K, K2=None): sage: set_random_seed() sage: J = random_cone(max_dim = 8, solid=True, strictly_convex=False) sage: K = Cone(random_sublist(J.rays(), 0.5), lattice=J.lattice()) - sage: K_W = rho(K, J) - sage: K_star_W_star = rho(K.dual(), J).dual() - sage: basically_the_same(K_W, K_star_W_star) + sage: K_W_star = _rho(K, J).dual() + sage: K_star_W = _rho(K.dual(), J) + sage: _basically_the_same(K_W_star, K_star_W) True :: @@ -196,9 +239,9 @@ def rho(K, K2=None): sage: set_random_seed() sage: J = random_cone(max_dim = 8, solid=False, strictly_convex=True) sage: K = Cone(random_sublist(J.rays(), 0.5), lattice=J.lattice()) - sage: K_W = rho(K, J) - sage: K_star_W_star = rho(K.dual(), J).dual() - sage: basically_the_same(K_W, K_star_W_star) + sage: K_W_star = _rho(K, J).dual() + sage: K_star_W = _rho(K.dual(), J) + sage: _basically_the_same(K_W_star, K_star_W) True :: @@ -206,9 +249,9 @@ def rho(K, K2=None): sage: set_random_seed() sage: J = random_cone(max_dim = 8, solid=True, strictly_convex=True) sage: K = Cone(random_sublist(J.rays(), 0.5), lattice=J.lattice()) - sage: K_W = rho(K, J) - sage: K_star_W_star = rho(K.dual(), J).dual() - sage: basically_the_same(K_W, K_star_W_star) + sage: K_W_star = _rho(K, J).dual() + sage: K_star_W = _rho(K.dual(), J) + sage: _basically_the_same(K_W_star, K_star_W) True """ @@ -585,6 +628,47 @@ def lyapunov_rank(K): sage: lyapunov_rank(K) == lyapunov_rank(K1) + lyapunov_rank(K2) True + The Lyapunov rank is invariant under a linear isomorphism + [Orlitzky/Gowda]_:: + + sage: K1 = random_cone(max_dim = 8) + sage: A = random_matrix(QQ, K1.lattice_dim(), algorithm='unimodular') + sage: K2 = Cone( [ A*r for r in K1.rays() ], lattice=K1.lattice()) + sage: lyapunov_rank(K1) == lyapunov_rank(K2) + True + + Just to be sure, test a few more:: + + sage: K1 = random_cone(max_dim=8, strictly_convex=True, solid=True) + sage: A = random_matrix(QQ, K1.lattice_dim(), algorithm='unimodular') + sage: K2 = Cone( [ A*r for r in K1.rays() ], lattice=K1.lattice()) + sage: lyapunov_rank(K1) == lyapunov_rank(K2) + True + + :: + + sage: K1 = random_cone(max_dim=8, strictly_convex=True, solid=False) + sage: A = random_matrix(QQ, K1.lattice_dim(), algorithm='unimodular') + sage: K2 = Cone( [ A*r for r in K1.rays() ], lattice=K1.lattice()) + sage: lyapunov_rank(K1) == lyapunov_rank(K2) + True + + :: + + sage: K1 = random_cone(max_dim=8, strictly_convex=False, solid=True) + sage: A = random_matrix(QQ, K1.lattice_dim(), algorithm='unimodular') + sage: K2 = Cone( [ A*r for r in K1.rays() ], lattice=K1.lattice()) + sage: lyapunov_rank(K1) == lyapunov_rank(K2) + True + + :: + + sage: K1 = random_cone(max_dim=8, strictly_convex=False, solid=False) + sage: A = random_matrix(QQ, K1.lattice_dim(), algorithm='unimodular') + sage: K2 = Cone( [ A*r for r in K1.rays() ], lattice=K1.lattice()) + sage: lyapunov_rank(K1) == lyapunov_rank(K2) + True + The dual cone `K^{*}` of ``K`` should have the same Lyapunov rank as ``K`` itself [Rudolf et al.]_:: @@ -652,8 +736,8 @@ def lyapunov_rank(K): sage: set_random_seed() sage: K = random_cone(max_dim=8) sage: actual = lyapunov_rank(K) - sage: K_S = rho(K) - sage: K_SP = rho(K_S.dual()).dual() + sage: K_S = _rho(K) + sage: K_SP = _rho(K_S.dual()).dual() sage: l = K.lineality() sage: c = K.codim() sage: expected = lyapunov_rank(K_SP) + K.dim()*(l + c) + c**2 @@ -707,7 +791,7 @@ def lyapunov_rank(K): if m < n: # K is not solid, restrict to its span. - K = rho(K) + K = _rho(K) # Lemma 2 beta += m*(n - m) + (n - m)**2 @@ -715,8 +799,8 @@ def lyapunov_rank(K): if l > 0: # K is not pointed, restrict to the span of its dual. Uses a # proposition from our paper, i.e. this is equivalent to K = - # rho(K.dual()).dual(). - K = rho(K, K.dual()) + # _rho(K.dual()).dual(). + K = _rho(K, K.dual()) # Lemma 3 beta += m * l -- 2.44.2