source: branches/devel-1/include/ClpSimplexDual.hpp @ 15

Last change on this file since 15 was 14, checked in by forrest, 18 years ago

Breaking out whileIterating

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 6.8 KB
Line 
1// Copyright (C) 2002, International Business Machines
2// Corporation and others.  All Rights Reserved.
3
4/*
5   Authors
6   
7   John Forrest
8
9 */
10#ifndef ClpSimplexDual_H
11#define ClpSimplexDual_H
12
13#include "ClpSimplex.hpp"
14
15/** This solves LPs using the dual simplex method
16
17    It inherits from ClpSimplex.  It has no data of its own and
18    is never created - only cast from a ClpSimplex object at algorithm time.
19
20*/
21
22class ClpSimplexDual : public ClpSimplex {
23
24public:
25
26  /**@name Description of algorithm */
27  //@{
28  /** Dual algorithm
29
30      Method
31
32     It tries to be a single phase approach with a weight of 1.0 being
33     given to getting optimal and a weight of updatedDualBound_ being
34     given to getting dual feasible.  In this version I have used the
35     idea that this weight can be thought of as a fake bound.  If the
36     distance between the lower and upper bounds on a variable is less
37     than the feasibility weight then we are always better off flipping
38     to other bound to make dual feasible.  If the distance is greater
39     then we make up a fake bound updatedDualBound_ away from one bound.
40     If we end up optimal or primal infeasible, we check to see if
41     bounds okay.  If so we have finished, if not we increase updatedDualBound_
42     and continue (after checking if unbounded). I am undecided about
43     free variables - there is coding but I am not sure about it.  At
44     present I put them in basis anyway.
45
46     The code is designed to take advantage of sparsity so arrays are
47     seldom zeroed out from scratch or gone over in their entirety.
48     The only exception is a full scan to find outgoing variable for
49     Dantzig row choice.  For steepest edge we keep an updated list
50     of infeasibilities (actually squares). 
51     On easy problems we don't need full scan - just
52     pick first reasonable.
53
54     One problem is how to tackle degeneracy and accuracy.  At present
55     I am using the modification of costs which I put in OSL and some
56     of what I think is the dual analog of Gill et al.
57     I am still not sure of the exact details.
58
59     The flow of dual is three while loops as follows:
60
61     while (not finished) {
62
63       while (not clean solution) {
64
65          Factorize and/or clean up solution by flipping variables so
66          dual feasible.  If looks finished check fake dual bounds.
67          Repeat until status is iterating (-1) or finished (0,1,2)
68
69       }
70
71       while (status==-1) {
72
73         Iterate until no pivot in or out or time to re-factorize.
74
75         Flow is:
76
77         choose pivot row (outgoing variable).  if none then
78         we are primal feasible so looks as if done but we need to
79         break and check bounds etc.
80
81         Get pivot row in tableau
82
83         Choose incoming column.  If we don't find one then we look
84         primal infeasible so break and check bounds etc.  (Also the
85         pivot tolerance is larger after any iterations so that may be
86         reason)
87
88         If we do find incoming column, we may have to adjust costs to
89         keep going forwards (anti-degeneracy).  Check pivot will be stable
90         and if unstable throw away iteration and break to re-factorize.
91         If minor error re-factorize after iteration.
92
93         Update everything (this may involve flipping variables to stay
94         dual feasible.
95
96       }
97
98     }
99
100     TODO's (or maybe not)
101
102     At present we never check we are going forwards.  I overdid that in
103     OSL so will try and make a last resort.
104
105     Needs partial scan pivot out option.
106
107     May need other anti-degeneracy measures, especially if we try and use
108     loose tolerances as a way to solve in fewer iterations.
109
110     I like idea of dynamic scaling.  This gives opportunity to decouple
111     different implications of scaling for accuracy, iteration count and
112     feasibility tolerance.
113
114  */
115
116  int dual();
117  //@}
118
119  /**@name Functions used in dual */
120  //@{
121  /** This has the flow between re-factorizations
122      Broken out for clarity and will be used by strong branching
123   */
124  void whileIterating(); 
125  /** The duals are updated by the given arrays.
126      Returns number of infeasibilities.
127      After rowArray and columnArray will just have those which
128      have been flipped.
129      Variables may be flipped between bounds to stay dual feasible.
130      The output vector has movement of primal
131      solution (row length array) */
132  int updateDualsInDual(OsiIndexedVector * rowArray,
133                  OsiIndexedVector * columnArray,
134                  OsiIndexedVector * outputArray,
135                  double theta,
136                  double & objectiveChange);
137  /** While updateDualsInDual sees what effect is of flip
138      this does actuall flipping.
139      If change >0.0 then value in array >0.0 => from lower to upper
140  */
141  void flipBounds(OsiIndexedVector * rowArray,
142                  OsiIndexedVector * columnArray,
143                  double change);
144  /**
145      Row array has row part of pivot row
146      Column array has column part.
147      This chooses pivot column.
148      Spare arrays are used to save pivots which will go infeasible
149      We will check for basic so spare array will never overflow.
150      If necessary will modify costs
151      For speed, we may need to go to a bucket approach when many
152      variables are being flipped
153  */
154  void dualColumn(OsiIndexedVector * rowArray,
155                  OsiIndexedVector * columnArray,
156                  OsiIndexedVector * spareArray,
157                  OsiIndexedVector * spareArray2);
158  /**
159      Chooses dual pivot row
160      Would be faster with separate region to scan
161      and will have this (with square of infeasibility) when steepest
162      For easy problems we can just choose one of the first rows we look at
163  */
164  void dualRow();
165  /** Checks if any fake bounds active - if so returns number and modifies
166      updatedDualBound_ and everything.
167      Free variables will be left as free
168      Returns number of bounds changed if >=0
169      Returns -1 if not initialize and no effect
170      Fills in changeVector which can be used to see if unbounded
171      and cost of change vector
172  */
173  int changeBounds(bool initialize,OsiIndexedVector * outputArray,
174                   double & changeCost);
175  /** As changeBounds but just changes new bounds for a single variable.
176      Returns true if change */
177  bool changeBound( int iSequence);
178  /// Restores bound to original bound
179  void originalBound(int iSequence);
180  /** Checks if tentative optimal actually means unbounded in dual
181      Returns -3 if not, 2 if is unbounded */
182  int checkUnbounded(OsiIndexedVector * ray,OsiIndexedVector * spare,
183                     double changeCost);
184  /**  Refactorizes if necessary
185       Checks if finished.  Updates status.
186       lastCleaned refers to iteration at which some objective/feasibility
187       cleaning too place.
188
189       type - 0 initial so set up save arrays etc
190            - 1 normal -if good update save
191            - 2 restoring from saved
192  */
193  void statusOfProblemInDual(int & lastCleaned, int type);
194  /// Perturbs problem (method depends on perturbation())
195  void perturb();
196  //@}
197};
198#endif
Note: See TracBrowser for help on using the repository browser.