X-Git-Url: http://gitweb.michael.orlitzky.com/?a=blobdiff_plain;f=src%2FLinearProgramming.py;h=ceb2e5fca184f5825720406ef3db6c7a30c6cad4;hb=9e873928a42a91b468e56bf95217f8504c12dad2;hp=1d72ad5e6522da0f75cac6cd4a54533d6cafa6f3;hpb=1803f36d3cc39a3f11bffccacf7447174ca23f43;p=dead%2Fcensus-tools.git diff --git a/src/LinearProgramming.py b/src/LinearProgramming.py index 1d72ad5..ceb2e5f 100644 --- a/src/LinearProgramming.py +++ b/src/LinearProgramming.py @@ -3,6 +3,8 @@ Classes to create, solve, and make dinner for linear programs. Handles integration with lp_solve. """ +import fractions +from numpy import * import os import site import sys @@ -10,35 +12,478 @@ import sys # Add LP_SOLVE_PATH to our path. There is no point to this variable # other than to make the site.addsitedir() line fit within 80 # characters. -LP_SOLVE_PATH = '/../lib/lp_solve' -site.addsitedir(os.path.dirname(os.path.abspath(sys.argv[0])) + LP_SOLVE_PATH) +LP_SOLVE_PATH1 = '/../../lib/lp_solve' +LP_SOLVE_PATH2 = '/../lib/lp_solve' +site.addsitedir(os.path.dirname(os.path.abspath(sys.argv[0])) + LP_SOLVE_PATH1) +site.addsitedir(os.path.dirname(os.path.abspath(sys.argv[0])) + LP_SOLVE_PATH2) -from lp_solve import * +from lpsolve55 import * -# Constants denoting the three different types of (in)equalities. -# These are defined by lp_solve. -LEQ = -1 # Less than or equal to. -EQ = 0 # Equal to -GEQ = 1 # Greater than or Equal to. +# Constants representing the two types of linear programs. +# MINIMIZE means that we would like to minimize the objective +# function, and MAXIMIZE means that we would like to maximize it. +MINIMIZE = 0 +MAXIMIZE = 1 class LinearProgram(object): """ - Represents an instance of an lp_solve linear program. + Represents an instance of an lp_solve linear program. + The actual lp_solve linear program is only created when it + is needed, and modifications to it are cached beforehand. """ - + + + def get_row_count(self): + """ + Return the number of rows in the constraint matrix. + """ + return len(self.constraint_matrix) + + + def get_column_count(self): + """ + Return the number of columns in the constraint matrix. + If we don't have any rows yet, claim zero columns as well. + """ + if self.get_row_count() == 0: + return 0 + else: + return len(self.constraint_matrix[0]) + + + + @property + def type(self): + """ + A property representing the type of linear program, either + MINIMIZE or MAXIMIZE. + """ + return self._type + + @type.setter + def type(self, type): + if type == MINIMIZE: + self._type = MINIMIZE + if self._lp != None: + lpsolve('set_minim', self._lp) + else: + self._type = MAXIMIZE + if self._lp != None: + lpsolve('set_maxim', self._lp) + + + + @property + def objective_coefficients(self): + return self._objective_coefficients + + + @objective_coefficients.setter + def objective_coefficients(self, value): + self._objective_coefficients = value + + if self._lp != None: + lpsolve('set_obj_fn', + self._lp, + self._objective_coefficients) + + + + @property + def constraint_matrix(self): + return self._constraint_matrix + + @constraint_matrix.setter + def constraint_matrix(self, value): + self._constraint_matrix = value + + if self._lp != None: + lpsolve('set_mat', self._lp, value) + + + + @property + def rhs(self): + return self._rhs + + @rhs.setter + def rhs(self, value): + self._rhs = value + + if self._lp != None: + lpsolve('set_rh_vec', self._lp, self._rhs) + + + + @property + def inequalities(self): + return self._inequalities + + @inequalities.setter + def inequalities(self, value): + self._inequalities = value + + if self._lp != None: + for i in range(self.get_row_count()): + lpsolve('set_constr_type', self._lp, i+1, value[i]) + + + @property + def solution_lower_bounds(self): + return self._solution_lower_bounds + + @solution_lower_bounds.setter + def solution_lower_bounds(self, value): + if len(value) != self.get_column_count(): + return + + self._solution_lower_bounds = value + + if self._lp != None: + for i in range(self.get_column_count()): + lpsolve('set_lowbo', self._lp, i+1, value[i]) + + + + @property + def solution_upper_bounds(self): + return self._solution_upper_bounds + + + @solution_upper_bounds.setter + def solution_upper_bounds(self, value): + if len(value) != self.get_column_count(): + return + + self._solution_upper_bounds = value + + if self._lp != None: + for i in range(self.get_column_count()): + lpsolve('set_upbo', self._lp, i+1, value[i]) + + + + @property + def integer_variables(self): + """ + A vector containing the indices of any solution variables + which must be integers. + """ + return self._integer_variables + + @integer_variables.setter + def integer_variables(self, value): + self._integer_variables = value + + if self._lp != None: + for i in range(len(value)): + lpsolve('set_int', self._lp, value[i], 1) + + + + def make_all_variables_integers(self): + """ + Force all solution variables to be integers. This is achieved + by filling the integer_variables vector with all possible + indices. + """ + ivs = [] + + for i in range(self.get_column_count()): + ivs.append(i+1) + if self._lp != None: + lpsolve('set_int', self._lp, i+1, 1) + + self.integer_variables = ivs + + + + @property + def scale_mode(self): + """ + The scaling mode used for handling floating point numbers. + See for more + information. + """ + return self._scale_mode + + + @scale_mode.setter + def scale_mode(self, value): + self._scale_mode = value + + if self._lp != None: + lpsolve('set_scaling', self._lp, value) + + + + def print_tableau(self): + """ + Tell lp_solve to print its simplex tableau. Only works after + a successful call to solve(). + """ + lpsolve('set_outputfile', self._lp, '') + lpsolve('print_tableau', self._lp) + + def __init__(self): - self.objective_function_coefficients = [] - self.constraint_matrix = [] - self.rhs = [] - self.inequalities = [] + """ + Initialize the object, setting all of the properties + either empty or to sane defaults. + + The _lp variable is set to None, initially. All of the + property setters will test for _lp == None, and will refuse + to make calls to lp_solve if that is the case. A new instance + of an lp_solve linear program will be created (and stored in + the _lp variable) on demand. + + If the _lp variable is *not* None, the property setters will + make calls to lp_solve, updating the pre-existing linear program. + """ + + self._lp = None + self._objective_coefficients = [] + self._constraint_matrix = [] + self._rhs = [] + self._inequalities = [] + self._integer_variables = [] + self._solution_lower_bounds = [] + self._solution_upper_bounds = [] + self._scale_mode = 0 + self._type = MINIMIZE + + + def set_all_lp_properties(self): + """ + Re-set all linear program properties. After a new linear + program is created, it will be 'empty'. We already have + its properties stored in our member variables, however, + we need to make calls to lp_solve to set them on the new + linear program instance. + + All of the property setters will check for the existence of + self._lp and make calls to lp_solve as necessary. So, to set + all of our properties, we just have to trigger the property + setters a second time. + """ + self.constraint_matrix = self.constraint_matrix + self.rhs = self.rhs + self.objective_coefficients = self.objective_coefficients + self.inequalities = self.inequalities + self.integer_variables = self.integer_variables + self.solution_lower_bounds = self.solution_lower_bounds + self.solution_upper_bounds = self.solution_upper_bounds + self.scale_mode = self.scale_mode + self.type = self.type + + + + def delete(self): + if self._lp != None: + lpsolve('delete_lp', self._lp) + + + + def create_lp_if_necessary(self): + """ + If we already have a linear program instance, do nothing. + Otherwise, create one, and set all of the necessary properties. + """ + if self._lp != None: + return + + self._lp = lpsolve('make_lp', + self.get_row_count(), + self.get_column_count()) + + # This is not critical, but it will encourage lp_solve to + # warn us about potential problems. + lpsolve('set_verbose', self._lp, IMPORTANT) + + self.set_all_lp_properties() + def solve(self): - [v,x,duals] = lp_solve(self.objective_function_coefficients, - self.constraint_matrix, - self.rhs, - self.inequalities) - return [v,x,duals] + """ + Solve the linear program. The lp_solve instance is + created beforehand if necessary. + """ + self.create_lp_if_necessary() + result = lpsolve('solve', self._lp) + + # Default to empty return values. + obj = [] + x = [] + duals = [] + + # See http://lpsolve.sourceforge.net/5.5/solve.htm for a + # description of these constants. + if (result == OPTIMAL or + result == SUBOPTIMAL or + result == PROCBREAK or + result == FEASFOUND): + + # If the result was "good," i.e. if get_solution will work, + # call it and use its return value as ours. + [obj, x, duals, ret] = lpsolve('get_solution', self._lp) + + return [obj, x, duals] + + + def objective_coefficient_gcd(self): + """ + Return the GCD of all objective function coefficients. + """ + return reduce(fractions.gcd, self.objective_coefficients) + + + +class SimplexIteration(object): + """ + Represents the 'current' iteration of the simplex method at some + point. It needs an A,b,x, and c corresponding to the linear + program in standard form. It can then determine whether or not the + current vertex (x) is optimal, and possible move to a better one. + """ + + @property + def constraint_matrix(self): + return self._constraint_matrix.tolist() + + @constraint_matrix.setter + def constraint_matrix(self, value): + self._constraint_matrix = matrix(value) + + @property + def rhs(self): + return self._rhs + + @rhs.setter + def rhs(self, value): + self._rhs = value + + @property + def objective_coefficients(self): + return self._objective_coefficients + + @objective_coefficients.setter + def objective_coefficients(self, value): + self._objective_coefficients = value + + @property + def solution_vector(self): + return self._solution_vector.tolist() + + @solution_vector.setter + def solution_vector(self, value): + self._solution_vector = array(value) + + + @property + def basic_variables(self): + # The current set of basic variables. Constructed from the + # "true" source, our list of basic indices. + idxs = self.basic_indices + return map(lambda x: "x" + str(x+1), idxs) + + + @basic_variables.setter + def basic_variables(self, value): + """ + Syntactic sugar to set the basic indices. We take a string + of the form x1,x2,...xn, and subtract one from each of the + subscripts to get the basic indices. + """ + basic_indices = [] + vars = value.split(',') + + for var in vars: + var = var.strip() + var = var.replace('x', '') + basic_indices.append(int(var)-1) + + self.basic_indices = basic_indices + + + @property + def nonbasic_variables(self): + # All elements of the solution vector that have value zero. + idxs = self.nonbasic_indices + return map(lambda x: "x" + str(x+1), idxs) + + + @property + def basic_indices(self): + return self._basic_indices + + + @basic_indices.setter + def basic_indices(self, value): + self._basic_indices = value + + + @property + def nonbasic_indices(self): + all_indices = range(0, len(self.solution_vector)) + return list(set(all_indices) - set(self.basic_indices)) + + + @property + def optimal(self): + # True if the current solution is optimal, false if not. + for idx in self.nonbasic_indices: + if self.reduced_cost(idx) < 0.0: + return False + + return True + + + def constraint_column(self, idx): + # Return the column of the constraint matrix corresponding + # to index idx. + bm = matrix(self.constraint_matrix).transpose().tolist() + return bm[idx] + + def negative_constraint_column(self, idx): + # Return the column of the constraint matrix corresponding + # to index idx, multiplied by negative one. + bm = (-matrix(self.constraint_matrix).transpose()).tolist() + return bm[idx] + + def basis_matrix(self): + # Return the columns of our constraint matrix corresponding + # to the basic variables. + idxs = self.nonbasic_indices + bm = matrix(self.constraint_matrix) + bm = delete(bm, idxs, axis=1) + return bm.tolist() + + def reduced_cost(self, idx): + # Find the reduced cost ofthe variable whose column has index + # idx. + dx = array(self.delta_x(idx)) + c = array(self.objective_coefficients) + return dot(dx, c) + + def delta_x(self, idx): + # Return the array of deltas that would result from increasing + # the variable with index idx by one. + A = matrix(self.basis_matrix()) + b = array(self.negative_constraint_column(idx)) + x = linalg.solve(A,b).tolist() + + # Now we get to add back the nonbasic columns. The 'idx' column + # should have a value of 1, and the other two non-basics should + # have a value of 0. + xfull = [0.0] * len(self.solution_vector) + bidxs = self.basic_indices + + for bidx in bidxs: + xfull[bidx] = x.pop(0) + + xfull[idx] = 1.0 + return xfull