source: branches/devel-1/include/ClpSimplexDual.hpp @ 19

Last change on this file since 19 was 19, checked in by ladanyi, 17 years ago

reordering Clp

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 7.9 KB
Line 
1// Copyright (C) 2002, International Business Machines
2// Corporation and others.  All Rights Reserved.
3
4/*
5   Authors
6   
7   John Forrest
8
9 */
10#ifndef ClpSimplexDual_H
11#define ClpSimplexDual_H
12
13#include "ClpSimplex.hpp"
14
15/** This solves LPs using the dual simplex method
16
17    It inherits from ClpSimplex.  It has no data of its own and
18    is never created - only cast from a ClpSimplex object at algorithm time.
19
20*/
21
22class ClpSimplexDual : public ClpSimplex {
23
24public:
25
26  /**@name Description of algorithm */
27  //@{
28  /** Dual algorithm
29
30      Method
31
32     It tries to be a single phase approach with a weight of 1.0 being
33     given to getting optimal and a weight of updatedDualBound_ being
34     given to getting dual feasible.  In this version I have used the
35     idea that this weight can be thought of as a fake bound.  If the
36     distance between the lower and upper bounds on a variable is less
37     than the feasibility weight then we are always better off flipping
38     to other bound to make dual feasible.  If the distance is greater
39     then we make up a fake bound updatedDualBound_ away from one bound.
40     If we end up optimal or primal infeasible, we check to see if
41     bounds okay.  If so we have finished, if not we increase updatedDualBound_
42     and continue (after checking if unbounded). I am undecided about
43     free variables - there is coding but I am not sure about it.  At
44     present I put them in basis anyway.
45
46     The code is designed to take advantage of sparsity so arrays are
47     seldom zeroed out from scratch or gone over in their entirety.
48     The only exception is a full scan to find outgoing variable for
49     Dantzig row choice.  For steepest edge we keep an updated list
50     of infeasibilities (actually squares). 
51     On easy problems we don't need full scan - just
52     pick first reasonable.
53
54     One problem is how to tackle degeneracy and accuracy.  At present
55     I am using the modification of costs which I put in OSL and some
56     of what I think is the dual analog of Gill et al.
57     I am still not sure of the exact details.
58
59     The flow of dual is three while loops as follows:
60
61     while (not finished) {
62
63       while (not clean solution) {
64
65          Factorize and/or clean up solution by flipping variables so
66          dual feasible.  If looks finished check fake dual bounds.
67          Repeat until status is iterating (-1) or finished (0,1,2)
68
69       }
70
71       while (status==-1) {
72
73         Iterate until no pivot in or out or time to re-factorize.
74
75         Flow is:
76
77         choose pivot row (outgoing variable).  if none then
78         we are primal feasible so looks as if done but we need to
79         break and check bounds etc.
80
81         Get pivot row in tableau
82
83         Choose incoming column.  If we don't find one then we look
84         primal infeasible so break and check bounds etc.  (Also the
85         pivot tolerance is larger after any iterations so that may be
86         reason)
87
88         If we do find incoming column, we may have to adjust costs to
89         keep going forwards (anti-degeneracy).  Check pivot will be stable
90         and if unstable throw away iteration and break to re-factorize.
91         If minor error re-factorize after iteration.
92
93         Update everything (this may involve flipping variables to stay
94         dual feasible.
95
96       }
97
98     }
99
100     TODO's (or maybe not)
101
102     At present we never check we are going forwards.  I overdid that in
103     OSL so will try and make a last resort.
104
105     Needs partial scan pivot out option.
106
107     May need other anti-degeneracy measures, especially if we try and use
108     loose tolerances as a way to solve in fewer iterations.
109
110     I like idea of dynamic scaling.  This gives opportunity to decouple
111     different implications of scaling for accuracy, iteration count and
112     feasibility tolerance.
113
114  */
115
116  int dual();
117  /** For strong branching.  On input lower and upper are new bounds
118      while on output they are change in objective function values
119      (>1.0e50 infeasible).
120      Return code is 0 if nothing interesting, -1 if infeasible both
121      ways and +1 if infeasible one way (check values to see which one(s))
122  */
123  int strongBranching(int numberVariables,const int * variables,
124                      double * newLower, double * newUpper,
125                      bool stopOnFirstInfeasible=true,
126                      bool alwaysFinish=false);
127  //@}
128
129  /**@name Functions used in dual */
130  //@{
131  /** This has the flow between re-factorizations
132      Broken out for clarity and will be used by strong branching
133
134      Reasons to come out:
135      -1 iterations etc
136      -2 inaccuracy
137      -3 slight inaccuracy (and done iterations)
138      +0 looks optimal (might be unbounded - but we will investigate)
139      +1 looks infeasible
140      +3 max iterations
141   */
142  int whileIterating(); 
143  /** The duals are updated by the given arrays.
144      Returns number of infeasibilities.
145      After rowArray and columnArray will just have those which
146      have been flipped.
147      Variables may be flipped between bounds to stay dual feasible.
148      The output vector has movement of primal
149      solution (row length array) */
150  int updateDualsInDual(CoinIndexedVector * rowArray,
151                  CoinIndexedVector * columnArray,
152                  CoinIndexedVector * outputArray,
153                  double theta,
154                  double & objectiveChange);
155  /** While updateDualsInDual sees what effect is of flip
156      this does actuall flipping.
157      If change >0.0 then value in array >0.0 => from lower to upper
158  */
159  void flipBounds(CoinIndexedVector * rowArray,
160                  CoinIndexedVector * columnArray,
161                  double change);
162  /**
163      Row array has row part of pivot row
164      Column array has column part.
165      This chooses pivot column.
166      Spare arrays are used to save pivots which will go infeasible
167      We will check for basic so spare array will never overflow.
168      If necessary will modify costs
169      For speed, we may need to go to a bucket approach when many
170      variables are being flipped
171  */
172  void dualColumn(CoinIndexedVector * rowArray,
173                  CoinIndexedVector * columnArray,
174                  CoinIndexedVector * spareArray,
175                  CoinIndexedVector * spareArray2);
176  /**
177      Chooses dual pivot row
178      Would be faster with separate region to scan
179      and will have this (with square of infeasibility) when steepest
180      For easy problems we can just choose one of the first rows we look at
181  */
182  void dualRow();
183  /** Checks if any fake bounds active - if so returns number and modifies
184      updatedDualBound_ and everything.
185      Free variables will be left as free
186      Returns number of bounds changed if >=0
187      Returns -1 if not initialize and no effect
188      Fills in changeVector which can be used to see if unbounded
189      and cost of change vector
190  */
191  int changeBounds(bool initialize,CoinIndexedVector * outputArray,
192                   double & changeCost);
193  /** As changeBounds but just changes new bounds for a single variable.
194      Returns true if change */
195  bool changeBound( int iSequence);
196  /// Restores bound to original bound
197  void originalBound(int iSequence);
198  /** Checks if tentative optimal actually means unbounded in dual
199      Returns -3 if not, 2 if is unbounded */
200  int checkUnbounded(CoinIndexedVector * ray,CoinIndexedVector * spare,
201                     double changeCost);
202  /**  Refactorizes if necessary
203       Checks if finished.  Updates status.
204       lastCleaned refers to iteration at which some objective/feasibility
205       cleaning too place.
206
207       type - 0 initial so set up save arrays etc
208            - 1 normal -if good update save
209            - 2 restoring from saved
210  */
211  void statusOfProblemInDual(int & lastCleaned, int type);
212  /// Perturbs problem (method depends on perturbation())
213  void perturb();
214  /** Fast iterations.  Misses out a lot of initialization.
215      Normally stops on maximum iterations, first re-factorization
216      or tentative optimum.  If looks interesting then continues as
217      normal.  Returns 0 if finished properly, 1 otherwise.
218  */
219  int fastDual(bool alwaysFinish=false);
220  /** Checks number of variables at fake bounds.  This is used by fastDual
221      so can exit gracefully before end */
222  int numberAtFakeBound();
223  //@}
224};
225#endif
Note: See TracBrowser for help on using the repository browser.