source: trunk/Bonmin/src/Interfaces/BonTMINLP.hpp @ 1438

Last change on this file since 1438 was 1438, checked in by pbonami, 10 years ago

Merge r1435 into trunk

  • Property svn:eol-style set to native
  • Property svn:keywords set to "Author Date Id Revision"
File size: 14.1 KB
Line 
1// (C) Copyright International Business Machines Corporation and
2// Carnegie Mellon University 2004, 2007
3//
4// All Rights Reserved.
5// This code is published under the Common Public License.
6//
7// Authors :
8// Pierre Bonami, Carnegie Mellon University,
9// Carl D. Laird, Carnegie Mellon University,
10// Andreas Waechter, International Business Machines Corporation
11//
12// Date : 12/01/2004
13
14#ifndef __TMINLP_HPP__
15#define __TMINLP_HPP__
16
17#include "IpUtils.hpp"
18#include "IpReferenced.hpp"
19#include "IpException.hpp"
20#include "IpAlgTypes.hpp"
21#include "CoinPackedMatrix.hpp"
22#include "OsiCuts.hpp"
23#include "IpTNLP.hpp"
24#include "CoinError.hpp"
25
26#include "CoinHelperFunctions.hpp"
27using namespace Ipopt;
28namespace Bonmin
29{
30  DECLARE_STD_EXCEPTION(TMINLP_INVALID);
31  DECLARE_STD_EXCEPTION(TMINLP_INVALID_VARIABLE_BOUNDS);
32
33  /** Base class for all MINLPs that use a standard triplet matrix form
34   *  and dense vectors.
35   *  The class TMINLP2TNLP allows the caller to produce a viable TNLP
36   *  from the MINLP (by relaxing binary and/or integers, or by
37   *  fixing them), which can then be solved by Ipopt.
38   *
39   *  This interface presents the problem form:
40   *  \f[
41   *  \begin{array}{rl}
42   *     &min f(x)\\
43   *
44   *     \mbox{s.t.}&\\
45   *      &   g^L <= g(x) <= g^U\\
46   *
47   *       &   x^L <=  x   <= x^U\\
48   *   \end{array}
49   *  \f]
50   *  Where each x_i is either a continuous, binary, or integer variable.
51   *  If x_i is binary, the bounds [xL,xU] are assumed to be [0,1].
52   *  In order to specify an equality constraint, set gL_i = gU_i =
53   *  rhs.  The value that indicates "infinity" for the bounds
54   *  (i.e. the variable or constraint has no lower bound (-infinity)
55   *  or upper bound (+infinity)) is set through the option
56   *  nlp_lower_bound_inf and nlp_upper_bound_inf.  To indicate that a
57   *  variable has no upper or lower bound, set the bound to
58   *  -ipopt_inf or +ipopt_inf respectively
59   */
60  class TMINLP : public Ipopt::ReferencedObject
61  {
62  public:
63    friend class TMINLP2TNLP;
64    /** Return statuses of algorithm.*/
65    enum SolverReturn{
66      SUCCESS,
67      INFEASIBLE,
68      CONTINUOUS_UNBOUNDED,
69      LIMIT_EXCEEDED,
70      MINLP_ERROR};
71    /** Class to store sos constraints for model */
72    struct SosInfo
73    {
74      /** Number of SOS constraints.*/
75      int num;
76      /** Type of sos. At present Only type '1' SOS are supported by Cbc*/
77      char * types;
78      /** priorities of sos constraints.*/
79      int * priorities;
80     
81      /** \name Sparse storage of the elements of the SOS constraints.*/
82      /** @{ */
83      /** Total number of non zeroes in SOS constraints.*/
84      int numNz;
85      /** For 0 <= i < nums, start[i] gives the indice of indices and weights arrays at which the description of constraints i begins..*/ 
86      int * starts;
87      /** indices of elements belonging to the SOS.*/
88      int * indices;
89      /** weights of the elements of the SOS.*/
90      double * weights;
91      /** @} */
92      /** default constructor. */
93      SosInfo();
94      /** Copy constructor.*/
95      SosInfo(const SosInfo & source);
96     
97
98      /** destructor*/
99      ~SosInfo()
100      {
101        gutsOfDestructor();
102      }
103
104
105      /** Reset information */
106      void gutsOfDestructor();
107
108    };
109
110    /** Stores branching priorities information. */
111    struct BranchingInfo
112    {
113      /**number of variables*/
114      int size;
115      /** User set priorities on variables. */
116      int * priorities;
117      /** User set preferered branching direction. */
118      int * branchingDirections;
119      /** User set up pseudo costs.*/
120      double * upPsCosts;
121      /** User set down pseudo costs.*/
122      double * downPsCosts;
123      BranchingInfo():
124      size(0),
125      priorities(NULL),
126      branchingDirections(NULL),
127      upPsCosts(NULL),
128      downPsCosts(NULL)
129      {}
130      BranchingInfo(const BranchingInfo &other)
131      {
132        gutsOfDestructor();
133        size = other.size;
134        priorities = CoinCopyOfArray(other.priorities, size);
135        branchingDirections = CoinCopyOfArray(other.branchingDirections, size);
136        upPsCosts = CoinCopyOfArray(other.upPsCosts, size);
137        downPsCosts = CoinCopyOfArray(other.downPsCosts, size);
138      }
139      void gutsOfDestructor()
140      {
141      if (priorities != NULL) delete [] priorities;
142      priorities = NULL;
143      if (branchingDirections != NULL) delete [] branchingDirections; 
144      branchingDirections = NULL;
145      if (upPsCosts != NULL) delete [] upPsCosts;
146      upPsCosts = NULL;
147      if (downPsCosts != NULL) delete [] downPsCosts;
148      downPsCosts = NULL;
149      }
150      ~BranchingInfo()
151      {
152        gutsOfDestructor();
153      }
154    };
155
156    /** Class to store perturbation radii for variables in the model */
157    class PerturbInfo
158    {
159    public:
160      /** default constructor. */
161      PerturbInfo() :
162        perturb_radius_(NULL)
163      {}
164
165      /** destructor*/
166      ~PerturbInfo()
167      {
168        delete [] perturb_radius_;
169      }
170
171      /** Method for setting the perturbation radii. */
172      void SetPerturbationArray(Index numvars, const double* perturb_radius);
173
174      /** Method for getting the array for the perturbation radii in
175       *  order to use the values. */
176      const double* GetPerturbationArray() const {
177        return perturb_radius_;
178      }
179
180    private:
181      /** Copy constructor.*/
182      PerturbInfo(const PerturbInfo & source);
183
184      /** Perturbation radii for all variables.  A negative value
185       *  means that the radius has not been given. If the pointer is
186       *  NULL, then no variables have been assigned a perturbation
187       *  radius. */
188      double* perturb_radius_;
189    };
190
191    /** Type of the variables.*/
192    enum VariableType
193    {
194      CONTINUOUS,
195      BINARY,
196      INTEGER
197    };
198
199    /**@name Constructors/Destructors */
200    //@{
201    TMINLP();
202
203    /** Default destructor */
204    virtual ~TMINLP();
205    //@}
206
207    /**@name methods to gather information about the MINLP */
208    //@{
209    /** overload this method to return the number of variables
210     *  and constraints, and the number of non-zeros in the jacobian and
211     *  the hessian. */
212    virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
213        Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)=0;
214
215    /** overload this method to return scaling parameters. This is
216     *  only called if the options are set to retrieve user scaling.
217     *  There, use_x_scaling (or use_g_scaling) should get set to true
218     *  only if the variables (or constraints) are to be scaled.  This
219     *  method should return true only if the scaling parameters could
220     *  be provided.
221     */
222    virtual bool get_scaling_parameters(Number& obj_scaling,
223                                        bool& use_x_scaling, Index n,
224                                        Number* x_scaling,
225                                        bool& use_g_scaling, Index m,
226                                        Number* g_scaling)
227    {
228      return false;
229    }
230
231
232    /** overload this method to provide the variables types. The var_types
233     *  array will be allocated with length n. */
234    virtual bool get_variables_types(Index n, VariableType* var_types)=0;
235
236    /** overload this method to provide the variables linearity.
237     * array should be allocated with length at least n.*/
238    virtual bool get_variables_linearity(Index n, 
239                                           Ipopt::TNLP::LinearityType* var_types) = 0;
240
241    /** overload this method to provide the constraint linearity.
242     * array should be allocated with length at least m.*/
243    virtual bool get_constraints_linearity(Index m, 
244                                           Ipopt::TNLP::LinearityType* const_types) = 0;
245
246    /** overload this method to return the information about the bound
247     *  on the variables and constraints. The value that indicates
248     *  that a bound does not exist is specified in the parameters
249     *  nlp_lower_bound_inf and nlp_upper_bound_inf.  By default,
250     *  nlp_lower_bound_inf is -1e19 and nlp_upper_bound_inf is
251     *  1e19.
252     *  An exception will be thrown if x_l and x_u are not 0,1 for binary variables
253     */
254    virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
255        Index m, Number* g_l, Number* g_u)=0;
256
257    /** overload this method to return the starting point. The bools
258     *  init_x and init_lambda are both inputs and outputs. As inputs,
259     *  they indicate whether or not the algorithm wants you to
260     *  initialize x and lambda respectively. If, for some reason, the
261     *  algorithm wants you to initialize these and you cannot, set
262     *  the respective bool to false.
263     */
264    virtual bool get_starting_point(Index n, bool init_x, Number* x,
265                                    bool init_z, Number* z_L, Number* z_U,
266        Index m, bool init_lambda,
267        Number* lambda)=0;
268
269    /** overload this method to return the value of the objective function */
270    virtual bool eval_f(Index n, const Number* x, bool new_x,
271        Number& obj_value)=0;
272
273    /** overload this method to return the vector of the gradient of
274     *  the objective w.r.t. x */
275    virtual bool eval_grad_f(Index n, const Number* x, bool new_x,
276        Number* grad_f)=0;
277
278    /** overload this method to return the vector of constraint values */
279    virtual bool eval_g(Index n, const Number* x, bool new_x,
280        Index m, Number* g)=0;
281
282    /** overload this method to return the jacobian of the
283     *  constraints. The vectors iRow and jCol only need to be set
284     *  once. The first call is used to set the structure only (iRow
285     *  and jCol will be non-NULL, and values will be NULL) For
286     *  subsequent calls, iRow and jCol will be NULL. */
287    virtual bool eval_jac_g(Index n, const Number* x, bool new_x,
288        Index m, Index nele_jac, Index* iRow,
289        Index *jCol, Number* values)=0;
290
291    /** overload this method to return the hessian of the
292     *  lagrangian. The vectors iRow and jCol only need to be set once
293     *  (during the first call). The first call is used to set the
294     *  structure only (iRow and jCol will be non-NULL, and values
295     *  will be NULL) For subsequent calls, iRow and jCol will be
296     *  NULL. This matrix is symmetric - specify the lower diagonal
297     *  only */
298    virtual bool eval_h(Index n, const Number* x, bool new_x,
299        Number obj_factor, Index m, const Number* lambda,
300        bool new_lambda, Index nele_hess,
301        Index* iRow, Index* jCol, Number* values)=0;
302    /** Compute the value of a single constraint. The constraint
303     *  number is i (starting counting from 0. */
304    virtual bool eval_gi(Index n, const Number* x, bool new_x,
305                         Index i, Number& gi)
306    {
307      std::cerr << "Method eval_gi not overloaded from TMINLP\n";
308      throw -1;
309    }
310    /** Compute the structure or values of the gradient for one
311     *  constraint. The constraint * number is i (starting counting
312     *  from 0.  Other things are like with eval_jac_g. */
313    virtual bool eval_grad_gi(Index n, const Number* x, bool new_x,
314                              Index i, Index& nele_grad_gi, Index* jCol,
315                              Number* values)
316    {
317      std::cerr << "Method eval_grad_gi not overloaded from TMINLP\n";
318      throw -1;
319    }
320    //@}
321
322    /** @name Solution Methods */
323    //@{
324    /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
325    virtual void finalize_solution(TMINLP::SolverReturn status,
326                                   Index n, const Number* x, Number obj_value) =0;
327    //@}
328   
329    virtual const BranchingInfo * branchingInfo() const = 0;
330
331    virtual const SosInfo * sosConstraints() const = 0;
332
333    virtual const PerturbInfo* perturbInfo() const
334    {
335      return NULL;
336    }
337
338    /** Say if has a specific function to compute upper bounds*/
339    virtual bool hasUpperBoundingObjective(){
340      return false;}
341   
342    /** overload this method to return the value of an alternative objective function for
343      upper bounding (to use it hasUpperBoundingObjective should return true).*/
344    virtual bool eval_upper_bound_f(Index n, const Number* x,
345                                    Number& obj_value){ return false; }
346
347   /** Used to mark constraints of the problem.*/
348   enum Convexity {
349     Convex/** Constraint is convex.*/,
350     NonConvex/** Constraint is non-convex.*/,
351     SimpleConcave/** Constraint is concave of the simple form y >= F(x).*/};
352
353   /** Structure for marked non-convex constraints. With possibility of
354       storing index of a constraint relaxing the non-convex constraint*/
355   struct MarkedNonConvex {
356      /** Default constructor gives "safe" values.*/
357         MarkedNonConvex():
358         cIdx(-1), cRelaxIdx(-1){}
359         /** Index of the nonconvex constraint.*/
360      int cIdx;
361         /** Index of constraint relaxing the nonconvex constraint.*/
362         int cRelaxIdx;};
363   /** Structure which describes a constraints of the form
364       $f[ y \gt F(x) \f]
365          with \f$ F(x) \f$ a concave function.*/
366   struct SimpleConcaveConstraint{
367      /** Default constructor gives "safe" values.*/
368         SimpleConcaveConstraint():
369           xIdx(-1), yIdx(-1), cIdx(-1){}
370      /** Index of the variable x.*/
371      int xIdx;
372      /** Index of the variable y.*/
373         int yIdx;
374      /** Index of the constraint.*/
375         int cIdx;};
376    /** Get accest to constraint convexities.*/
377    virtual bool get_constraint_convexities(int m, TMINLP::Convexity * constraints_convexities)const {
378      CoinFillN(constraints_convexities, m, TMINLP::Convex);
379      return true;}
380  /** Get dimension information on nonconvex constraints.*/
381  virtual bool get_number_nonconvex(int & number_non_conv, int & number_concave) const{
382    number_non_conv = 0;
383    number_concave = 0;
384    return true;} 
385  /** Get array describing the constraints marked nonconvex in the model.*/
386  virtual bool get_constraint_convexities(int number_non_conv, MarkedNonConvex * non_convs) const{
387    assert(number_non_conv == 0);
388    return true;}
389  /** Fill array containing indices of simple concave constraints.*/ 
390  virtual bool get_simple_concave_constraints(int number_concave, SimpleConcaveConstraint * simple_concave) const{
391    assert(number_concave == 0);
392    return true;}
393
394  /** Say if problem has a linear objective (for OA) */
395  virtual bool hasLinearObjective(){return false;}
396
397  protected:
398    /** Copy constructor */
399    //@{
400    /** Copy Constructor */
401    TMINLP(const TMINLP&);
402
403    /** Overloaded Equals Operator */
404    void operator=(const TMINLP&);
405    //@}
406
407  private:
408  };
409
410} // namespace Ipopt
411
412#endif
413
Note: See TracBrowser for help on using the repository browser.