Changeset 2181


Ignore:
Timestamp:
Jan 27, 2010 5:34:06 PM (10 years ago)
Author:
wehart
Message:

Segregating the set-cover FDT heuristic (which doesn't work right now).

Location:
coopr.fdt/trunk/coopr/fdt
Files:
1 added
1 edited

Legend:

Unmodified
Added
Removed
  • coopr.fdt/trunk/coopr/fdt/FDT.py

    r2124 r2181  
    11
    2 #from coopr.opt import SolverRegistration, IOptSolver, SolverFactory, SolverManagerFactory, SolverStatus, TerminationCondition
    32from coopr.opt import *
    43import copy
     
    65import bidict
    76from pyutilib.plugin.core import implements
    8 #from coopr.pyomo import sequence, Set, Param, Var, Constraint, Objective, Model, RangeSet, maximize, generate_canonical_repn, as_expr, apply_transformation, xsequence
    97from coopr.pyomo import *
    108import math
     
    5654        Solve the specified problem and return a SolverResults object
    5755        """
     56        # TODO: handling n
     57        # TODO: complement objective costs for binary variables
     58        # TODO: ensure that objective is minimization
    5859        instance = args[0]
    5960        if len(args) == 2:
     
    103104            omega[i] = rootlp[i-1]
    104105        n = len(omega)
     106        # TODO: distinguish between continuous vars in obj and binary vars
    105107        keys = list(self.branching_lp.binary_vars)
    106108        keys.sort()
     
    118120            for node in pool:
    119121                node.fixed = i
     122                # TODO: also append to pool if the ith value is 0 or 1
    120123                if node.integer:
    121124                    tmppool.append(node)
     
    123126                branchinglp = self.generate_branching_lp(node, varmap, keys, reduced_instance)
    124127                results = self.solver_mngr.solve(branchinglp, opt=self.opt, keepFiles=True, tee=self.options.tee, timelimit=self.options.timelimit, options=" ".join(self.options.lpsolver_options))
    125                 results.write()
     128                if options.debug:
     129                    results.write()
    126130                #
    127131                # TODO: process results better here...
     
    142146        print "FINAL POOL"
    143147        print pool
     148        # TODO: process final pool to eliminate things that aren't feasible
     149        #   for the constraint matrix
     150        # iterate for all 1's in the solution: try to drive the 1's to 0
     151        #
    144152        for item in pool:
    145153            #BUG: reduced_instance.load(item.values)
     
    234242        self.branching_lp.y_fixed.clear()
    235243        self.branching_lp.y_fixed.add(0, self.branching_lp.y[i,0] == 0)
     244        # TODO: address dominant feasibility here
     245        #self.branching_lp.y_fixed.add(1, self.branching_lp.y[i,1] <= self.branching_lp.l[1])
    236246        self.branching_lp.y_fixed.add(1, self.branching_lp.y[i,1] == self.branching_lp.l[1])
    237247        self.branching_lp.y_fixed.construct()
     
    391401    def process_results(self, node, results, tmppool):
    392402        """ Process a branching LP results object and possibly update the pool """
     403        # TODO: round parent values based on branching decisions (->1)
    393404        soln = results.solution()
    394405        did_work=False
     
    457468
    458469
    459 #
    460 # This is a specialization of the FDT heuristic for the
    461 # set-cover problem.
    462 #
    463 class SetCoverFDT(FDT):
    464 
    465     def __init__(self, options):
    466         FDT.__init__(self, options)
    467 
    468     def run(self, instance):
    469         #
    470         # Compute the LP relaxation of the model instance
    471         #
    472         root_lp_value = self.process_root_lp(instance)
    473         instance.pprint()
    474         #
    475         # Generate a reduced instance, where variables with value less thatn
    476         # a proscribed tolerance are fixed to zero.
    477         #
    478         reduced_instance, varmap = self.eliminate_zero_variables(instance, self.options.tol)
    479         #reduced_instance.display()
    480         omega = reduced_instance.solution()
    481         print "OMEGA",omega
    482         print "VARMAP",varmap
    483         # initial pool
    484         pool = [Container(values=omega, nfixed=0, integer=self.is_integer(omega, self.options.tol))]
    485         n = len(omega)
    486         # iterate until all locations are fixed
    487         for i in range(n):
    488             tmppool=[]
    489             did_work=False
    490             for node in pool:
    491                 if node.integer:
    492                     node.nfixed += 1
    493                     tmppool.append(node)
    494                     continue
    495 
    496                 branchinglp = self.generate_branching_lp(node.nfixed+1, n, node.values, varmap, reduced_instance)
    497                 results = self.solver_mngr.solve(branchinglp, opt=self.opt, tee=self.options.tee, timelimit=self.options.timelimit, options=" ".join(self.options.lpsolver_options))
    498                 results.write()
    499                 if self.bad_results(results):
    500                     print "Terminating due to error in results"
    501                     return None
    502                 soln = results.solution()
    503                 print soln.variable.keys()
    504                 if soln.variable.l[0].value > self.options.tol:
    505                     values = []
    506                     for i in xsequence(n):
    507                         values.append( soln.variable.x[i,0].value / soln.variable.l[0].value )
    508                     did_work=True
    509                     tmppool += [ Container(nfixed=node.nfixed+1, values=values, integer=self.is_integer(values, self.options.tol)) ]
    510                 if soln.variable.l[1].value > self.options.tol:
    511                     values = []
    512                     for i in xsequence(n):
    513                         values.append( soln.variable.x[i,1].value / soln.variable.l[1].value )
    514                     did_work=True
    515                     tmppool += [ Container(nfixed=node.nfixed+1, values=values, integer=self.is_integer(values, self.options.tol)) ]
    516             if did_work:
    517                 if (self.options.always_prune or len(pool) > n):
    518                     pool = self.prune(tmppool, n, omega)
    519                 else:
    520                     pool = tmppool
    521             print pool
    522         print pool
    523         for item in pool:
    524             #BUG: reduced_instance.load(item.values)
    525             for i in xsequence(len(item.values)):
    526                 reduced_instance.variable(i).value = item.values[i]
    527             cost = reduced_instance.cost()
    528             print "HERE", item.values, cost, "ratio=%s" % (root_lp_value / cost)
    529         # TODO: final stuff
    530 
    531     def bad_results(self, results):
    532         soln = results.solution()
    533         if soln.variable.l[0].value + soln.variable.l[1].value < 0.75:
    534             print "Lambda values do not sum to a value greater than 0.75!"
    535             return True
    536         return False
    537 
    538     def prune(self, pool, n, omega):
    539         print "Pool", pool
    540         #
    541         # Create a packing LP model
    542         #
    543         model = Model()
    544         #
    545         model.V = RangeSet(1,n)
    546         #
    547         def xs_rule(i, model):
    548             return omega[i-1]
    549         model.xs = Param(model.V, initialize=xs_rule)
    550         #
    551         model.Good = RangeSet(1,len(pool))
    552         #
    553         def Sols_rule(i, u, model):
    554             return pool[i-1].values[u-1]
    555         model.Sols = Param(model.Good, model.V, initialize=Sols_rule)
    556         #
    557         model.lam = Var(model.Good, bounds=(0.0,1.0))
    558         #
    559         def Prunedom_rule(model):
    560             return summation(model.lam)
    561         model.Prunedom = Objective(rule=Prunedom_rule, sense=maximize)
    562         #
    563         def ConvComb_rule(u, model):
    564             return sum([ model.lam[i]*model.Sols[i,u] for i in model.Good]) <= model.xs[u]
    565         model.ConvComb = Constraint(model.V, rule=ConvComb_rule)
    566         #
    567         # Construct and solve this model
    568         #
    569         instance = model.create()
    570         results = self.solver_mngr.solve(instance, opt=self.opt, tee=self.options.tee, timelimit=self.options.timelimit, options=" ".join(self.options.solver_options))
    571         #
    572         # Create new pool using the nonzero lambdas
    573         #
    574         newpool = []
    575         soln = results.solution()
    576         print "Root LP", omega
    577         print "Pool", pool
    578         for i in soln.variable.lam:
    579             print "Packing Info:", i, soln.variable.lam[i].value, pool[i-1].values
    580             if soln.variable.lam[i].value > 0.0:
    581                 newpool.append( pool[i-1] )
    582         return newpool
    583 
    584     def generate_branching_lp(self, fix_var, n, xb, varmap, root):
    585         #
    586         model = Model()
    587         #
    588         model.fix_var = Param(initialize=fix_var)
    589         #
    590         def SS_rule(model):
    591             ans = set()
    592             for (i,j) in root.S:
    593                 try:
    594                     ans.add( (varmap[:(i-1)]+1, j) )
    595                 except:
    596                     pass
    597             return ans
    598         model.SS = Set(dimen=2, rule=SS_rule)
    599         #
    600         model.L = Set(initialize=set([0,1]))
    601         #
    602         model.V = RangeSet(1,n)
    603         #
    604         model.Fixed = RangeSet(1,fix_var-1)
    605         #
    606         def xb_rule(i, model):
    607             return xb[i-1]
    608         model.xb = Param(model.V, rule=xb_rule)
    609         #
    610         def M_rule(model):
    611             ans = set()
    612             for (i,j) in model.SS:
    613                 ans.add(j)
    614             return ans
    615         model.M = Set(rule=M_rule)
    616         #
    617         model.x = Var(model.V, model.L, bounds=(0.0,1.0))
    618         #
    619         model.l = Var(model.L, bounds=(0.0,1.0))
    620         #
    621         def Packdom_rule(model):
    622             return model.l[0]+model.l[1]
    623         model.Packdom = Objective(rule=Packdom_rule, sense=maximize)
    624         #
    625         def setcover_rule(i, mx, instance):
    626             expr = 0
    627             for (j,ii) in instance.SS:
    628                 if ii == i:
    629                     expr += instance.x[j,mx]
    630             return instance.l[mx] <=  expr
    631         model.setcover = Constraint(model.M, model.L, rule=setcover_rule)
    632         #
    633         def upbound_rule(i, mx, instance):
    634             return instance.x[i,mx] <= instance.l[mx]
    635         model.upbound = Constraint(model.V, model.L, rule=upbound_rule)
    636         #
    637         def packinxs_rule(i, instance):
    638             return instance.x[i,0] + instance.x[i,1] <= instance.xb[i]
    639         model.packinxs = Constraint(model.V, rule=packinxs_rule)
    640         #
    641         def FixLoworHigh_rule(u, j, instance):
    642             return instance.x[u,j] == instance.l[j] * instance.xb[u]
    643 
    644         model.FixLoworHigh = Constraint(model.Fixed, model.L, rule=FixLoworHigh_rule)
    645         #
    646         def FixVarLoworHigh_rule(j, instance):
    647             return instance.x[value(instance.fix_var), j] == instance.l[j]*j
    648         model.FixVarLoworHigh = Constraint(model.L, rule=FixVarLoworHigh_rule)
    649         #
    650         # Create an instance of this model
    651         #
    652         instance = model.create()
    653         instance.pprint()
    654         return instance
    655 
    656 
    657470# Register the solver with the name 'fdt'
    658471SolverRegistration('fdt', FDT)
    659472
    660 
    661 if __name__ == "__main__":
    662     #
    663     # A hack to generate options
    664     #
    665     options=Container()
    666     print 'YY'
    667     options._name = 'options'
    668     print 'YY'
    669     options.keep_files=False
    670     options.log=False
    671     options.debug=True
    672     options.timelimit=None
    673     options.tee=True
    674     options.solver='cplex'
    675     options.solver_options=[]
    676     options.smanager_type='serial'
    677 
    678     import sc
    679     instance = sc.model.create()
    680     fdt = SetCoverFDT(options)
    681     fdt.run(instance)
    682 
Note: See TracChangeset for help on using the changeset viewer.