source: trunk/Cbc/src/CbcLinkedUtils.cpp

Last change on this file was 2465, checked in by unxusr, 4 months ago

script to format sources

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.7 KB
Line 
1// Copyright (C) 2007, International Business Machines
2// Corporation and others.  All Rights Reserved.
3// This code is licensed under the terms of the Eclipse Public License (EPL).
4
5/* $Id: CbcLinkedUtils.cpp 2465 2019-01-03 19:26:52Z forrest $ */
6
7/*! \file CbcAugmentClpSimplex.cpp
8    \brief Hooks to Ampl (for CbcLinked)
9
10    This code is a condensation of ClpAmplStuff.cpp, renamed to better
11    reflect its current place in cbc.
12
13  The code here had ties to NEW_STYLE_SOLVER code. During the 091209 Watson
14  meeting, NEW_STYLE_SOLVER code was eliminated. The code here was condensed
15  from ClpAmplStuff.cpp. The hook into CbcLinked is loadNonLinear. Once you
16  bring that in, all the rest follows. Still, we're down about 400 lines of
17  code. In the process, it appears that ClpAmplObjective.cpp was never needed
18  here; the code was hooked into ClpAmplStuff.cpp.  --lh, 091209 --
19*/
20
21#include "ClpConfig.h"
22#include "CbcConfig.h"
23#ifdef COIN_HAS_ASL
24#include "CoinPragma.hpp"
25#include "CoinHelperFunctions.hpp"
26#include "CoinIndexedVector.hpp"
27#include "ClpFactorization.hpp"
28#include "ClpSimplex.hpp"
29#include "ClpAmplObjective.hpp"
30#include "ClpConstraintAmpl.hpp"
31#include "ClpMessage.hpp"
32#include "CoinUtilsConfig.h"
33#include "CoinHelperFunctions.hpp"
34#include "CoinWarmStartBasis.hpp"
35#include "OsiSolverInterface.hpp"
36#include "Cbc_ampl.h"
37#include "CoinTime.hpp"
38#include "CglStored.hpp"
39#include "CoinModel.hpp"
40#include "CbcLinked.hpp"
41
42extern "C" {
43//# include "getstub.h"
44#include "asl_pfgh.h"
45}
46
47// stolen from IPopt with changes
48typedef struct {
49  double obj_sign_;
50  ASL_pfgh *asl_;
51  double *non_const_x_;
52  int *column_; // for jacobian
53  int *rowStart_;
54  double *gradient_;
55  double *constraintValues_;
56  int nz_h_full_; // number of nonzeros in hessian
57  int nerror_;
58  bool objval_called_with_current_x_;
59  bool conval_called_with_current_x_;
60  bool jacval_called_with_current_x_;
61} CbcAmplInfo;
62
63//#############################################################################
64// Constructors / Destructor / Assignment
65//#############################################################################
66
67//-------------------------------------------------------------------
68// Default Constructor
69//-------------------------------------------------------------------
70ClpAmplObjective::ClpAmplObjective()
71  : ClpObjective()
72{
73  type_ = 12;
74  objective_ = NULL;
75  amplObjective_ = NULL;
76  gradient_ = NULL;
77  offset_ = 0.0;
78}
79
80bool get_constraints_linearity(void *amplInfo, int n,
81  int *const_types)
82{
83  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo;
84  ASL_pfgh *asl = info->asl_;
85  //check that n is good
86  assert(n == n_con);
87  // check that there are no network constraints
88  assert(nlnc == 0 && lnc == 0);
89  //the first nlc constraints are non linear the rest is linear
90  int i;
91  for (i = 0; i < nlc; i++) {
92    const_types[i] = 1;
93  }
94  // the rest is linear
95  for (i = nlc; i < n_con; i++)
96    const_types[i] = 0;
97  return true;
98}
99static bool internal_objval(CbcAmplInfo *info, double &obj_val)
100{
101  ASL_pfgh *asl = info->asl_;
102  info->objval_called_with_current_x_ = false; // in case the call below fails
103
104  if (n_obj == 0) {
105    obj_val = 0;
106    info->objval_called_with_current_x_ = true;
107    return true;
108  } else {
109    double retval = objval(0, info->non_const_x_, (fint *)&info->nerror_);
110    if (!info->nerror_) {
111      obj_val = info->obj_sign_ * retval;
112      info->objval_called_with_current_x_ = true;
113      return true;
114    } else {
115      abort();
116    }
117  }
118
119  return false;
120}
121
122static bool internal_conval(CbcAmplInfo *info, double *g)
123{
124  ASL_pfgh *asl = info->asl_;
125  info->conval_called_with_current_x_ = false; // in case the call below fails
126  assert(g);
127
128  conval(info->non_const_x_, g, (fint *)&info->nerror_);
129
130  if (!info->nerror_) {
131    info->conval_called_with_current_x_ = true;
132    return true;
133  } else {
134    abort();
135  }
136  return false;
137}
138
139static bool apply_new_x(CbcAmplInfo *info, bool new_x, int n, const double *x)
140{
141  ASL_pfgh *asl = info->asl_;
142
143  if (new_x) {
144    // update the flags so these methods are called
145    // before evaluating the hessian
146    info->conval_called_with_current_x_ = false;
147    info->objval_called_with_current_x_ = false;
148    info->jacval_called_with_current_x_ = false;
149
150    //copy the data to the non_const_x_
151    if (!info->non_const_x_) {
152      info->non_const_x_ = new double[n];
153    }
154
155    for (int i = 0; i < n; i++) {
156      info->non_const_x_[i] = x[i];
157    }
158
159    // tell ampl that we have a new x
160    xknowne(info->non_const_x_, (fint *)&info->nerror_);
161    return info->nerror_ ? false : true;
162  }
163
164  return true;
165}
166
167static bool eval_f(void *amplInfo, int n, const double *x, bool new_x, double &obj_value)
168{
169  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo;
170  if (!apply_new_x(info, new_x, n, x)) {
171    return false;
172  }
173
174  return internal_objval(info, obj_value);
175}
176
177static bool eval_grad_f(void *amplInfo, int n, const double *x, bool new_x, double *grad_f)
178{
179  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo;
180  ASL_pfgh *asl = info->asl_;
181  if (!apply_new_x(info, new_x, n, x)) {
182    return false;
183  }
184  int i;
185
186  if (n_obj == 0) {
187    for (i = 0; i < n; i++) {
188      grad_f[i] = 0.;
189    }
190  } else {
191    objgrd(0, info->non_const_x_, grad_f, (fint *)&info->nerror_);
192    if (info->nerror_) {
193      return false;
194    }
195
196    if (info->obj_sign_ == -1) {
197      for (i = 0; i < n; i++) {
198        grad_f[i] = -grad_f[i];
199      }
200    }
201  }
202  return true;
203}
204
205static bool eval_g(void *amplInfo, int n, const double *x, bool new_x, double *g)
206{
207  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo;
208#ifndef NDEBUG
209  ASL_pfgh *asl = info->asl_;
210#endif
211  // warning: n_var is a macro that assumes we have a variable called asl
212  assert(n == n_var);
213
214  if (!apply_new_x(info, new_x, n, x)) {
215    return false;
216  }
217
218  return internal_conval(info, g);
219}
220
221static bool eval_jac_g(void *amplInfo, int n, const double *x, bool new_x,
222  double *values)
223{
224  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo;
225  ASL_pfgh *asl = info->asl_;
226  assert(n == n_var);
227
228  assert(values);
229  if (!apply_new_x(info, new_x, n, x)) {
230    return false;
231  }
232
233  jacval(info->non_const_x_, values, (fint *)&info->nerror_);
234  if (!info->nerror_) {
235    return true;
236  } else {
237    abort();
238  }
239  return false;
240}
241//-------------------------------------------------------------------
242// Useful Constructor
243//-------------------------------------------------------------------
244ClpAmplObjective::ClpAmplObjective(void *amplInfo)
245  : ClpObjective()
246{
247  type_ = 12;
248  activated_ = 1;
249  gradient_ = NULL;
250  objective_ = NULL;
251  offset_ = 0.0;
252  amplObjective_ = amplInfo;
253}
254
255//-------------------------------------------------------------------
256// Copy constructor
257//-------------------------------------------------------------------
258ClpAmplObjective::ClpAmplObjective(const ClpAmplObjective &rhs)
259  : ClpObjective(rhs)
260{
261  amplObjective_ = rhs.amplObjective_;
262  offset_ = rhs.offset_;
263  type_ = rhs.type_;
264  if (!amplObjective_) {
265    objective_ = NULL;
266    gradient_ = NULL;
267  } else {
268    CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
269    ASL_pfgh *asl = info->asl_;
270
271    int numberColumns = n_var;
272    ;
273    if (rhs.objective_) {
274      objective_ = new double[numberColumns];
275      memcpy(objective_, rhs.objective_, numberColumns * sizeof(double));
276    } else {
277      objective_ = NULL;
278    }
279    if (rhs.gradient_) {
280      gradient_ = new double[numberColumns];
281      memcpy(gradient_, rhs.gradient_, numberColumns * sizeof(double));
282    } else {
283      gradient_ = NULL;
284    }
285  }
286}
287
288//-------------------------------------------------------------------
289// Destructor
290//-------------------------------------------------------------------
291ClpAmplObjective::~ClpAmplObjective()
292{
293  delete[] objective_;
294  delete[] gradient_;
295}
296
297//----------------------------------------------------------------
298// Assignment operator
299//-------------------------------------------------------------------
300ClpAmplObjective &
301ClpAmplObjective::operator=(const ClpAmplObjective &rhs)
302{
303  if (this != &rhs) {
304    delete[] objective_;
305    delete[] gradient_;
306    amplObjective_ = rhs.amplObjective_;
307    offset_ = rhs.offset_;
308    type_ = rhs.type_;
309    if (!amplObjective_) {
310      objective_ = NULL;
311      gradient_ = NULL;
312    } else {
313      CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
314      ASL_pfgh *asl = info->asl_;
315
316      int numberColumns = n_var;
317      ;
318      if (rhs.objective_) {
319        objective_ = new double[numberColumns];
320        memcpy(objective_, rhs.objective_, numberColumns * sizeof(double));
321      } else {
322        objective_ = NULL;
323      }
324      if (rhs.gradient_) {
325        gradient_ = new double[numberColumns];
326        memcpy(gradient_, rhs.gradient_, numberColumns * sizeof(double));
327      } else {
328        gradient_ = NULL;
329      }
330    }
331  }
332  return *this;
333}
334
335// Returns gradient
336double *
337ClpAmplObjective::gradient(const ClpSimplex *model,
338  const double *solution, double &offset, bool refresh,
339  int includeLinear)
340{
341  if (model)
342    assert(model->optimizationDirection() == 1.0);
343#ifndef NDEBUG
344  bool scaling = model && (model->rowScale() || model->objectiveScale() != 1.0 || model->optimizationDirection() != 1.0);
345#endif
346  const double *cost = NULL;
347  if (model)
348    cost = model->costRegion();
349  if (!cost) {
350    // not in solve
351    cost = objective_;
352#ifndef NDEBUG
353    scaling = false;
354#endif
355  }
356  assert(!scaling);
357  if (!amplObjective_ || !solution || !activated_) {
358    offset = offset_;
359    return objective_;
360  } else {
361    if (refresh || !gradient_) {
362      CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
363      ASL_pfgh *asl = info->asl_;
364      int numberColumns = n_var;
365      ;
366
367      if (!gradient_)
368        gradient_ = new double[numberColumns];
369      assert(solution);
370      eval_grad_f(amplObjective_, numberColumns, solution, true, gradient_);
371      // Is this best way?
372      double objValue = 0.0;
373      eval_f(amplObjective_, numberColumns, solution, false, objValue);
374      double objValue2 = 0.0;
375      for (int i = 0; i < numberColumns; i++)
376        objValue2 += gradient_[i] * solution[i];
377      offset_ = objValue2 - objValue; // or other way???
378      if (model && model->optimizationDirection() != 1.0) {
379        offset *= model->optimizationDirection();
380        for (int i = 0; i < numberColumns; i++)
381          gradient_[i] *= -1.0;
382      }
383    }
384    offset = offset_;
385    return gradient_;
386  }
387}
388
389//-------------------------------------------------------------------
390// Clone
391//-------------------------------------------------------------------
392ClpObjective *ClpAmplObjective::clone() const
393{
394  return new ClpAmplObjective(*this);
395}
396// Resize objective
397void ClpAmplObjective::resize(int newNumberColumns)
398{
399  CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
400  ASL_pfgh *asl = info->asl_;
401  int numberColumns = n_var;
402  ;
403  if (numberColumns != newNumberColumns) {
404    abort();
405  }
406}
407// Delete columns in  objective
408void ClpAmplObjective::deleteSome(int numberToDelete, const int *which)
409{
410  if (numberToDelete)
411    abort();
412}
413/* Returns reduced gradient.Returns an offset (to be added to current one).
414 */
415double
416ClpAmplObjective::reducedGradient(ClpSimplex *model, double *region,
417  bool useFeasibleCosts)
418{
419  int numberRows = model->numberRows();
420  int numberColumns = model->numberColumns();
421
422  //work space
423  CoinIndexedVector *workSpace = model->rowArray(0);
424
425  CoinIndexedVector arrayVector;
426  arrayVector.reserve(numberRows + 1);
427
428  int iRow;
429#ifdef CLP_DEBUG
430  workSpace->checkClear();
431#endif
432  double *array = arrayVector.denseVector();
433  int *index = arrayVector.getIndices();
434  int number = 0;
435  const double *costNow = gradient(model, model->solutionRegion(), offset_,
436    true, useFeasibleCosts ? 2 : 1);
437  double *cost = model->costRegion();
438  const int *pivotVariable = model->pivotVariable();
439  for (iRow = 0; iRow < numberRows; iRow++) {
440    int iPivot = pivotVariable[iRow];
441    double value;
442    if (iPivot < numberColumns)
443      value = costNow[iPivot];
444    else if (!useFeasibleCosts)
445      value = cost[iPivot];
446    else
447      value = 0.0;
448    if (value) {
449      array[iRow] = value;
450      index[number++] = iRow;
451    }
452  }
453  arrayVector.setNumElements(number);
454
455  // Btran basic costs
456  model->factorization()->updateColumnTranspose(workSpace, &arrayVector);
457  double *work = workSpace->denseVector();
458  ClpFillN(work, numberRows, 0.0);
459  // now look at dual solution
460  double *rowReducedCost = region + numberColumns;
461  double *dual = rowReducedCost;
462  const double *rowCost = cost + numberColumns;
463  for (iRow = 0; iRow < numberRows; iRow++) {
464    dual[iRow] = array[iRow];
465  }
466  double *dj = region;
467  ClpDisjointCopyN(costNow, numberColumns, dj);
468
469  model->transposeTimes(-1.0, dual, dj);
470  for (iRow = 0; iRow < numberRows; iRow++) {
471    // slack
472    double value = dual[iRow];
473    value += rowCost[iRow];
474    rowReducedCost[iRow] = value;
475  }
476  return offset_;
477}
478/* Returns step length which gives minimum of objective for
479   solution + theta * change vector up to maximum theta.
480
481   arrays are numberColumns+numberRows
482*/
483double
484ClpAmplObjective::stepLength(ClpSimplex *model,
485  const double *solution,
486  const double *change,
487  double maximumTheta,
488  double &currentObj,
489  double &predictedObj,
490  double &thetaObj)
491{
492  // Assume convex
493  CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
494  ASL_pfgh *asl = info->asl_;
495
496  int numberColumns = n_var;
497  ;
498  double *tempSolution = new double[numberColumns];
499  double *tempGradient = new double[numberColumns];
500  // current
501  eval_f(amplObjective_, numberColumns, solution, true, currentObj);
502  double objA = currentObj;
503  double thetaA = 0.0;
504  // at maximum
505  int i;
506  for (i = 0; i < numberColumns; i++)
507    tempSolution[i] = solution[i] + maximumTheta * change[i];
508  eval_f(amplObjective_, numberColumns, tempSolution, true, thetaObj);
509  double objC = thetaObj;
510  double thetaC = maximumTheta;
511  double objB = 0.5 * (objA + objC);
512  double thetaB = 0.5 * maximumTheta;
513  double gradientNorm = 1.0e6;
514  while (gradientNorm > 1.0e-6 && thetaC - thetaA > 1.0e-8) {
515    for (i = 0; i < numberColumns; i++)
516      tempSolution[i] = solution[i] + thetaB * change[i];
517    eval_grad_f(amplObjective_, numberColumns, tempSolution, true, tempGradient);
518    eval_f(amplObjective_, numberColumns, tempSolution, false, objB);
519    double changeObj = 0.0;
520    gradientNorm = 0.0;
521    for (i = 0; i < numberColumns; i++) {
522      changeObj += tempGradient[i] * change[i];
523      gradientNorm += tempGradient[i] * tempGradient[i];
524    }
525    gradientNorm = fabs(changeObj) / sqrt(gradientNorm);
526    // Should try and get quadratic convergence by interpolation
527    if (changeObj < 0.0) {
528      // increasing is good
529      thetaA = thetaB;
530    } else {
531      // decreasing is good
532      thetaC = thetaB;
533    }
534    thetaB = 0.5 * (thetaA + thetaC);
535  }
536  delete[] tempSolution;
537  delete[] tempGradient;
538  predictedObj = objB;
539  return thetaB;
540}
541// Return objective value (without any ClpModel offset) (model may be NULL)
542double
543ClpAmplObjective::objectiveValue(const ClpSimplex *model, const double *solution) const
544{
545  CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
546  ASL_pfgh *asl = info->asl_;
547
548  int numberColumns = n_var;
549  ;
550  // current
551  double currentObj = 0.0;
552  eval_f(amplObjective_, numberColumns, solution, true, currentObj);
553  return currentObj;
554}
555// Scale objective
556void ClpAmplObjective::reallyScale(const double *columnScale)
557{
558  abort();
559}
560/* Given a zeroed array sets nonlinear columns to 1.
561   Returns number of nonlinear columns
562*/
563int ClpAmplObjective::markNonlinear(char *which)
564{
565  int iColumn;
566  CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
567  ASL_pfgh *asl = info->asl_;
568  int nonLinear = CoinMax(nlvc, nlvo);
569  for (iColumn = 0; iColumn < nonLinear; iColumn++) {
570    which[iColumn] = 1;
571  }
572  int numberNonLinearColumns = 0;
573  int numberColumns = n_var;
574  ;
575  for (iColumn = 0; iColumn < numberColumns; iColumn++) {
576    if (which[iColumn])
577      numberNonLinearColumns++;
578  }
579  return numberNonLinearColumns;
580}
581// Say we have new primal solution - so may need to recompute
582void ClpAmplObjective::newXValues()
583{
584  CbcAmplInfo *info = (CbcAmplInfo *)amplObjective_;
585  info->conval_called_with_current_x_ = false;
586  info->objval_called_with_current_x_ = false;
587  info->jacval_called_with_current_x_ = false;
588}
589
590//#############################################################################
591// Constructors / Destructor / Assignment
592//#############################################################################
593//-------------------------------------------------------------------
594// Default Constructor
595//-------------------------------------------------------------------
596ClpConstraintAmpl::ClpConstraintAmpl()
597  : ClpConstraint()
598{
599  type_ = 3;
600  column_ = NULL;
601  coefficient_ = NULL;
602  numberCoefficients_ = 0;
603  amplInfo_ = NULL;
604}
605
606//-------------------------------------------------------------------
607// Useful Constructor
608//-------------------------------------------------------------------
609ClpConstraintAmpl::ClpConstraintAmpl(int row, void *amplInfo)
610  : ClpConstraint()
611{
612  type_ = 3;
613  rowNumber_ = row;
614  amplInfo_ = amplInfo;
615  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo_;
616#ifndef NDEBUG
617  ASL_pfgh *asl = info->asl_;
618#endif
619  // warning: nlc is a macro that assumes we have a variable called asl
620  assert(rowNumber_ < nlc);
621  numberCoefficients_ = info->rowStart_[rowNumber_ + 1] - info->rowStart_[rowNumber_];
622  column_ = CoinCopyOfArray(info->column_ + info->rowStart_[rowNumber_], numberCoefficients_);
623  coefficient_ = new double[numberCoefficients_];
624  ;
625}
626
627//-------------------------------------------------------------------
628// Copy constructor
629//-------------------------------------------------------------------
630ClpConstraintAmpl::ClpConstraintAmpl(const ClpConstraintAmpl &rhs)
631  : ClpConstraint(rhs)
632{
633  numberCoefficients_ = rhs.numberCoefficients_;
634  column_ = CoinCopyOfArray(rhs.column_, numberCoefficients_);
635  coefficient_ = CoinCopyOfArray(rhs.coefficient_, numberCoefficients_);
636}
637
638//-------------------------------------------------------------------
639// Destructor
640//-------------------------------------------------------------------
641ClpConstraintAmpl::~ClpConstraintAmpl()
642{
643  delete[] column_;
644  delete[] coefficient_;
645}
646
647//----------------------------------------------------------------
648// Assignment operator
649//-------------------------------------------------------------------
650ClpConstraintAmpl &
651ClpConstraintAmpl::operator=(const ClpConstraintAmpl &rhs)
652{
653  if (this != &rhs) {
654    delete[] column_;
655    delete[] coefficient_;
656    numberCoefficients_ = rhs.numberCoefficients_;
657    column_ = CoinCopyOfArray(rhs.column_, numberCoefficients_);
658    coefficient_ = CoinCopyOfArray(rhs.coefficient_, numberCoefficients_);
659  }
660  return *this;
661}
662//-------------------------------------------------------------------
663// Clone
664//-------------------------------------------------------------------
665ClpConstraint *ClpConstraintAmpl::clone() const
666{
667  return new ClpConstraintAmpl(*this);
668}
669
670// Returns gradient
671int ClpConstraintAmpl::gradient(const ClpSimplex *model,
672  const double *solution,
673  double *gradient,
674  double &functionValue,
675  double &offset,
676  bool useScaling,
677  bool refresh) const
678{
679  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo_;
680  ASL_pfgh *asl = info->asl_;
681  int numberColumns = n_var;
682  ;
683  // If not done then do all
684  if (!info->jacval_called_with_current_x_) {
685    bool getStuff = eval_g(amplInfo_, numberColumns, solution, true, info->constraintValues_);
686    assert(getStuff);
687    getStuff = eval_jac_g(amplInfo_, numberColumns, solution, false, info->gradient_);
688    assert(getStuff);
689    info->jacval_called_with_current_x_ = getStuff;
690  }
691  if (refresh || !lastGradient_) {
692    functionValue_ = info->constraintValues_[rowNumber_];
693    offset_ = functionValue_; // sign??
694    if (!lastGradient_)
695      lastGradient_ = new double[numberColumns];
696    CoinZeroN(lastGradient_, numberColumns);
697    assert(!(model && model->rowScale() && useScaling));
698    int i;
699    int start = info->rowStart_[rowNumber_];
700    assert(numberCoefficients_ == info->rowStart_[rowNumber_ + 1] - start);
701    for (i = 0; i < numberCoefficients_; i++) {
702      int iColumn = column_[i];
703      double valueS = solution[iColumn];
704      double valueG = info->gradient_[start + i];
705      lastGradient_[iColumn] = valueG;
706      offset_ -= valueS * valueG;
707    }
708  }
709  functionValue = functionValue_;
710  offset = offset_;
711  memcpy(gradient, lastGradient_, numberColumns * sizeof(double));
712  return 0;
713}
714// Resize constraint
715void ClpConstraintAmpl::resize(int newNumberColumns)
716{
717  abort();
718}
719// Delete columns in  constraint
720void ClpConstraintAmpl::deleteSome(int numberToDelete, const int *which)
721{
722  if (numberToDelete) {
723    abort();
724  }
725}
726// Scale constraint
727void ClpConstraintAmpl::reallyScale(const double *columnScale)
728{
729  abort();
730}
731/* Given a zeroed array sets nonlinear columns to 1.
732   Returns number of nonlinear columns
733*/
734int ClpConstraintAmpl::markNonlinear(char *which) const
735{
736  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo_;
737  ASL_pfgh *asl = info->asl_;
738  int iColumn;
739  int numberNon = 0;
740  int nonLinear = CoinMax(nlvc, nlvo);
741  for (iColumn = 0; iColumn < numberCoefficients_; iColumn++) {
742    int jColumn = column_[iColumn];
743    if (jColumn < nonLinear) {
744      which[jColumn] = 1;
745      numberNon++;
746    }
747  }
748  return numberNon;
749}
750/* Given a zeroed array sets possible nonzero coefficients to 1.
751   Returns number of nonzeros
752*/
753int ClpConstraintAmpl::markNonzero(char *which) const
754{
755  int iColumn;
756  for (iColumn = 0; iColumn < numberCoefficients_; iColumn++) {
757    which[column_[iColumn]] = 1;
758  }
759  return numberCoefficients_;
760}
761// Number of coefficients
762int ClpConstraintAmpl::numberCoefficients() const
763{
764  return numberCoefficients_;
765}
766// Say we have new primal solution - so may need to recompute
767void ClpConstraintAmpl::newXValues()
768{
769  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo_;
770  info->conval_called_with_current_x_ = false;
771  info->objval_called_with_current_x_ = false;
772  info->jacval_called_with_current_x_ = false;
773}
774
775/* Load nonlinear part of problem from AMPL info
776   Returns 0 if linear
777   1 if quadratic objective
778   2 if quadratic constraints
779   3 if nonlinear objective
780   4 if nonlinear constraints
781   -1 on failure
782*/
783int ClpSimplex::loadNonLinear(void *amplInfo, int &numberConstraints,
784  ClpConstraint **&constraints)
785{
786  numberConstraints = 0;
787  constraints = NULL;
788  CbcAmplInfo *info = (CbcAmplInfo *)amplInfo;
789  ASL_pfgh *asl = info->asl_;
790  // For moment don't say quadratic
791  int type = 0;
792  if (nlo + nlc) {
793    // nonlinear
794    if (!nlc) {
795      type = 3;
796      delete objective_;
797      objective_ = new ClpAmplObjective(amplInfo);
798    } else {
799      type = 4;
800      numberConstraints = nlc;
801      constraints = new ClpConstraint *[numberConstraints];
802      if (nlo) {
803        delete objective_;
804        objective_ = new ClpAmplObjective(amplInfo);
805      }
806      for (int i = 0; i < numberConstraints; i++) {
807        constraints[i] = new ClpConstraintAmpl(i, amplInfo);
808      }
809    }
810  }
811  return type;
812}
813#else
814#include "ClpSimplex.hpp"
815#include "ClpConstraint.hpp"
816int ClpSimplex::loadNonLinear(void *, int &,
817  ClpConstraint **&)
818{
819  abort();
820  return 0;
821}
822#endif
823
824/* vi: softtabstop=2 shiftwidth=2 expandtab tabstop=2
825*/
Note: See TracBrowser for help on using the repository browser.