source: trunk/Cbc/src/CbcLinkedUtils.cpp @ 1424

Last change on this file since 1424 was 1401, checked in by lou, 10 years ago

A final commit to make it clear that CbcSolver? is an unused class in the
current code. See comments in CbcSolver?.hpp.

File size: 24.8 KB
Line 
1// Copyright (C) 2007, International Business Machines
2// Corporation and others.  All Rights Reserved.
3/* $Id$ */
4
5/*! \file CbcAugmentClpSimplex.cpp
6    \brief Hooks to Ampl (for CbcLinked)
7
8    This code is a condensation of ClpAmplStuff.cpp, renamed to better
9    reflect its current place in cbc.
10
11  The code here had ties to NEW_STYLE_SOLVER code. During the 091209 Watson
12  meeting, NEW_STYLE_SOLVER code was eliminated. The code here was condensed
13  from ClpAmplStuff.cpp. The hook into CbcLinked is loadNonLinear. Once you
14  bring that in, all the rest follows. Still, we're down about 400 lines of
15  code. In the process, it appears that ClpAmplObjective.cpp was never needed
16  here; the code was hooked into ClpAmplStuff.cpp.  --lh, 091209 --
17*/
18
19#include "ClpConfig.h"
20#include "CbcConfig.h"
21#ifdef COIN_HAS_ASL
22#include "CoinPragma.hpp"
23#include "CoinHelperFunctions.hpp"
24#include "CoinIndexedVector.hpp"
25#include "ClpFactorization.hpp"
26#include "ClpSimplex.hpp"
27#include "ClpAmplObjective.hpp"
28#include "ClpConstraintAmpl.hpp"
29#include "ClpMessage.hpp"
30#include "CoinUtilsConfig.h"
31#include "CoinHelperFunctions.hpp"
32#include "CoinWarmStartBasis.hpp"
33#include "OsiSolverInterface.hpp"
34#include "Cbc_ampl.h"
35#include "CoinTime.hpp"
36#include "CglStored.hpp"
37#include "CoinModel.hpp"
38#include "CbcLinked.hpp"
39
40extern "C" {
41    //# include "getstub.h"
42# include "asl_pfgh.h"
43}
44
45// stolen from IPopt with changes
46typedef struct {
47    double obj_sign_;
48    ASL_pfgh * asl_;
49    double * non_const_x_;
50    int * column_; // for jacobian
51    int * rowStart_;
52    double * gradient_;
53    double * constraintValues_;
54    int nz_h_full_; // number of nonzeros in hessian
55    int nerror_;
56    bool objval_called_with_current_x_;
57    bool conval_called_with_current_x_;
58    bool jacval_called_with_current_x_;
59} CbcAmplInfo;
60
61//#############################################################################
62// Constructors / Destructor / Assignment
63//#############################################################################
64
65//-------------------------------------------------------------------
66// Default Constructor
67//-------------------------------------------------------------------
68ClpAmplObjective::ClpAmplObjective ()
69        : ClpObjective()
70{
71    type_ = 12;
72    objective_ = NULL;
73    amplObjective_ = NULL;
74    gradient_ = NULL;
75    offset_ = 0.0;
76}
77
78bool get_constraints_linearity(void * amplInfo, int  n,
79                               int * const_types)
80{
81    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo;
82    ASL_pfgh* asl = info->asl_;
83    //check that n is good
84    assert(n == n_con);
85    // check that there are no network constraints
86    assert(nlnc == 0 && lnc == 0);
87    //the first nlc constraints are non linear the rest is linear
88    int i;
89    for (i = 0; i < nlc; i++) {
90        const_types[i] = 1;
91    }
92    // the rest is linear
93    for (i = nlc; i < n_con; i++)
94        const_types[i] = 0;
95    return true;
96}
97static bool internal_objval(CbcAmplInfo * info , double & obj_val)
98{
99    ASL_pfgh* asl = info->asl_;
100    info->objval_called_with_current_x_ = false; // in case the call below fails
101
102    if (n_obj == 0) {
103        obj_val = 0;
104        info->objval_called_with_current_x_ = true;
105        return true;
106    }  else {
107        double  retval = objval(0, info->non_const_x_, (fint*)info->nerror_);
108        if (!info->nerror_) {
109            obj_val = info->obj_sign_ * retval;
110            info->objval_called_with_current_x_ = true;
111            return true;
112        } else {
113            abort();
114        }
115    }
116
117    return false;
118}
119
120static bool internal_conval(CbcAmplInfo * info , double * g)
121{
122    ASL_pfgh* asl = info->asl_;
123    info->conval_called_with_current_x_ = false; // in case the call below fails
124    assert (g);
125
126    conval(info->non_const_x_, g, (fint*)info->nerror_);
127
128    if (!info->nerror_) {
129        info->conval_called_with_current_x_ = true;
130        return true;
131    } else {
132        abort();
133    }
134    return false;
135}
136
137static bool apply_new_x(CbcAmplInfo * info  , bool new_x, int  n, const double * x)
138{
139    ASL_pfgh* asl = info->asl_;
140
141    if (new_x) {
142        // update the flags so these methods are called
143        // before evaluating the hessian
144        info->conval_called_with_current_x_ = false;
145        info->objval_called_with_current_x_ = false;
146        info->jacval_called_with_current_x_ = false;
147
148        //copy the data to the non_const_x_
149        if (!info->non_const_x_) {
150            info->non_const_x_ = new double [n];
151        }
152
153        for (int  i = 0; i < n; i++) {
154            info->non_const_x_[i] = x[i];
155        }
156
157        // tell ampl that we have a new x
158        xknowne(info->non_const_x_, (fint*)info->nerror_);
159        return info->nerror_ ? false : true;
160    }
161
162    return true;
163}
164
165static bool eval_f(void * amplInfo, int  n, const double * x, bool new_x, double & obj_value)
166{
167    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo;
168    if (!apply_new_x(info, new_x, n, x)) {
169        return false;
170    }
171
172    return internal_objval(info, obj_value);
173}
174
175static bool eval_grad_f(void * amplInfo, int  n, const double * x, bool new_x, double * grad_f)
176{
177    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo;
178    ASL_pfgh* asl = info->asl_;
179    if (!apply_new_x(info, new_x, n, x)) {
180        return false;
181    }
182    int i;
183
184    if (n_obj == 0) {
185        for (i = 0; i < n; i++) {
186            grad_f[i] = 0.;
187        }
188    } else {
189        objgrd(0, info->non_const_x_, grad_f, (fint*)info->nerror_);
190        if (info->nerror_) {
191            return false;
192        }
193
194        if (info->obj_sign_ == -1) {
195            for (i = 0; i < n; i++) {
196                grad_f[i] = -grad_f[i];
197            }
198        }
199    }
200    return true;
201}
202
203static bool eval_g(void * amplInfo, int  n, const double * x, bool new_x, double * g)
204{
205    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo;
206#ifndef NDEBUG
207    ASL_pfgh* asl = info->asl_;
208#endif
209    // warning: n_var is a macro that assumes we have a variable called asl
210    assert(n == n_var);
211
212    if (!apply_new_x(info, new_x, n, x)) {
213        return false;
214    }
215
216    return internal_conval(info, g);
217}
218
219static bool eval_jac_g(void * amplInfo, int  n, const double * x, bool new_x,
220                       double * values)
221{
222    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo;
223    ASL_pfgh* asl = info->asl_;
224    assert(n == n_var);
225
226    assert (values);
227    if (!apply_new_x(info, new_x, n, x)) {
228        return false;
229    }
230
231    jacval(info->non_const_x_, values, (fint*)info->nerror_);
232    if (!info->nerror_) {
233        return true;
234    } else {
235        abort();
236    }
237    return false;
238}
239//-------------------------------------------------------------------
240// Useful Constructor
241//-------------------------------------------------------------------
242ClpAmplObjective::ClpAmplObjective (void * amplInfo)
243        : ClpObjective()
244{
245    type_ = 12;
246    activated_ = 1;
247    gradient_ = NULL;
248    objective_ = NULL;
249    offset_ = 0.0;
250    amplObjective_ = amplInfo;
251}
252
253//-------------------------------------------------------------------
254// Copy constructor
255//-------------------------------------------------------------------
256ClpAmplObjective::ClpAmplObjective (const ClpAmplObjective & rhs)
257        : ClpObjective(rhs)
258{
259    amplObjective_ = rhs.amplObjective_;
260    offset_ = rhs.offset_;
261    type_ = rhs.type_;
262    if (!amplObjective_) {
263        objective_ = NULL;
264        gradient_ = NULL;
265    } else {
266        CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
267        ASL_pfgh* asl = info->asl_;
268
269        int numberColumns = n_var;;
270        if (rhs.objective_) {
271            objective_ = new double [numberColumns];
272            memcpy(objective_, rhs.objective_, numberColumns*sizeof(double));
273        } else {
274            objective_ = NULL;
275        }
276        if (rhs.gradient_) {
277            gradient_ = new double [numberColumns];
278            memcpy(gradient_, rhs.gradient_, numberColumns*sizeof(double));
279        } else {
280            gradient_ = NULL;
281        }
282    }
283}
284
285
286//-------------------------------------------------------------------
287// Destructor
288//-------------------------------------------------------------------
289ClpAmplObjective::~ClpAmplObjective ()
290{
291    delete [] objective_;
292    delete [] gradient_;
293}
294
295//----------------------------------------------------------------
296// Assignment operator
297//-------------------------------------------------------------------
298ClpAmplObjective &
299ClpAmplObjective::operator=(const ClpAmplObjective & rhs)
300{
301    if (this != &rhs) {
302        delete [] objective_;
303        delete [] gradient_;
304        amplObjective_ = rhs.amplObjective_;
305        offset_ = rhs.offset_;
306        type_ = rhs.type_;
307        if (!amplObjective_) {
308            objective_ = NULL;
309            gradient_ = NULL;
310        } else {
311            CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
312            ASL_pfgh* asl = info->asl_;
313
314            int numberColumns = n_var;;
315            if (rhs.objective_) {
316                objective_ = new double [numberColumns];
317                memcpy(objective_, rhs.objective_, numberColumns*sizeof(double));
318            } else {
319                objective_ = NULL;
320            }
321            if (rhs.gradient_) {
322                gradient_ = new double [numberColumns];
323                memcpy(gradient_, rhs.gradient_, numberColumns*sizeof(double));
324            } else {
325                gradient_ = NULL;
326            }
327        }
328    }
329    return *this;
330}
331
332// Returns gradient
333double *
334ClpAmplObjective::gradient(const ClpSimplex * model,
335                           const double * solution, double & offset, bool refresh,
336                           int includeLinear)
337{
338    if (model)
339        assert (model->optimizationDirection() == 1.0);
340    bool scaling = false;
341    if (model && (model->rowScale() ||
342                  model->objectiveScale() != 1.0 || model->optimizationDirection() != 1.0))
343        scaling = true;
344    const double * cost = NULL;
345    if (model)
346        cost = model->costRegion();
347    if (!cost) {
348        // not in solve
349        cost = objective_;
350        scaling = false;
351    }
352    assert (!scaling);
353    if (!amplObjective_ || !solution || !activated_) {
354        offset = offset_;
355        return objective_;
356    } else {
357        if (refresh || !gradient_) {
358            CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
359            ASL_pfgh* asl = info->asl_;
360            int numberColumns = n_var;;
361
362            if (!gradient_)
363                gradient_ = new double[numberColumns];
364            assert (solution);
365            eval_grad_f(amplObjective_, numberColumns, solution, true, gradient_);
366            // Is this best way?
367            double objValue = 0.0;
368            eval_f(amplObjective_, numberColumns, solution, false, objValue);
369            double objValue2 = 0.0;
370            for (int i = 0; i < numberColumns; i++)
371                objValue2 += gradient_[i] * solution[i];
372            offset_ = objValue2 - objValue; // or other way???
373            if (model && model->optimizationDirection() != 1.0) {
374                offset *= model->optimizationDirection();
375                for (int i = 0; i < numberColumns; i++)
376                    gradient_[i] *= -1.0;
377            }
378        }
379        offset = offset_;
380        return gradient_;
381    }
382}
383
384//-------------------------------------------------------------------
385// Clone
386//-------------------------------------------------------------------
387ClpObjective * ClpAmplObjective::clone() const
388{
389    return new ClpAmplObjective(*this);
390}
391// Resize objective
392void
393ClpAmplObjective::resize(int newNumberColumns)
394{
395    CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
396    ASL_pfgh* asl = info->asl_;
397    int numberColumns = n_var;;
398    if (numberColumns != newNumberColumns) {
399        abort();
400    }
401
402}
403// Delete columns in  objective
404void
405ClpAmplObjective::deleteSome(int numberToDelete, const int * which)
406{
407    if (numberToDelete)
408        abort();
409}
410/* Returns reduced gradient.Returns an offset (to be added to current one).
411 */
412double
413ClpAmplObjective::reducedGradient(ClpSimplex * model, double * region,
414                                  bool useFeasibleCosts)
415{
416    int numberRows = model->numberRows();
417    int numberColumns = model->numberColumns();
418
419    //work space
420    CoinIndexedVector  * workSpace = model->rowArray(0);
421
422    CoinIndexedVector arrayVector;
423    arrayVector.reserve(numberRows + 1);
424
425    int iRow;
426#ifdef CLP_DEBUG
427    workSpace->checkClear();
428#endif
429    double * array = arrayVector.denseVector();
430    int * index = arrayVector.getIndices();
431    int number = 0;
432    const double * costNow = gradient(model, model->solutionRegion(), offset_,
433                                      true, useFeasibleCosts ? 2 : 1);
434    double * cost = model->costRegion();
435    const int * pivotVariable = model->pivotVariable();
436    for (iRow = 0; iRow < numberRows; iRow++) {
437        int iPivot = pivotVariable[iRow];
438        double value;
439        if (iPivot < numberColumns)
440            value = costNow[iPivot];
441        else if (!useFeasibleCosts)
442            value = cost[iPivot];
443        else
444            value = 0.0;
445        if (value) {
446            array[iRow] = value;
447            index[number++] = iRow;
448        }
449    }
450    arrayVector.setNumElements(number);
451
452    // Btran basic costs
453    model->factorization()->updateColumnTranspose(workSpace, &arrayVector);
454    double * work = workSpace->denseVector();
455    ClpFillN(work, numberRows, 0.0);
456    // now look at dual solution
457    double * rowReducedCost = region + numberColumns;
458    double * dual = rowReducedCost;
459    const double * rowCost = cost + numberColumns;
460    for (iRow = 0; iRow < numberRows; iRow++) {
461        dual[iRow] = array[iRow];
462    }
463    double * dj = region;
464    ClpDisjointCopyN(costNow, numberColumns, dj);
465
466    model->transposeTimes(-1.0, dual, dj);
467    for (iRow = 0; iRow < numberRows; iRow++) {
468        // slack
469        double value = dual[iRow];
470        value += rowCost[iRow];
471        rowReducedCost[iRow] = value;
472    }
473    return offset_;
474}
475/* Returns step length which gives minimum of objective for
476   solution + theta * change vector up to maximum theta.
477
478   arrays are numberColumns+numberRows
479*/
480double
481ClpAmplObjective::stepLength(ClpSimplex * model,
482                             const double * solution,
483                             const double * change,
484                             double maximumTheta,
485                             double & currentObj,
486                             double & predictedObj,
487                             double & thetaObj)
488{
489    // Assume convex
490    CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
491    ASL_pfgh* asl = info->asl_;
492
493    int numberColumns = n_var;;
494    double * tempSolution = new double [numberColumns];
495    double * tempGradient = new double [numberColumns];
496    // current
497    eval_f(amplObjective_, numberColumns, solution, true, currentObj);
498    double objA = currentObj;
499    double thetaA = 0.0;
500    // at maximum
501    int i;
502    for (i = 0; i < numberColumns; i++)
503        tempSolution[i] = solution[i] + maximumTheta * change[i];
504    eval_f(amplObjective_, numberColumns, tempSolution, true, thetaObj);
505    double objC = thetaObj;
506    double thetaC = maximumTheta;
507    double objB = 0.5 * (objA + objC);
508    double thetaB = 0.5 * maximumTheta;
509    double gradientNorm = 1.0e6;
510    while (gradientNorm > 1.0e-6 && thetaC - thetaA > 1.0e-8) {
511        for (i = 0; i < numberColumns; i++)
512            tempSolution[i] = solution[i] + thetaB * change[i];
513        eval_grad_f(amplObjective_, numberColumns, tempSolution, true, tempGradient);
514        eval_f(amplObjective_, numberColumns, tempSolution, false, objB);
515        double changeObj = 0.0;
516        gradientNorm = 0.0;
517        for (i = 0; i < numberColumns; i++) {
518            changeObj += tempGradient[i] * change[i];
519            gradientNorm += tempGradient[i] * tempGradient[i];
520        }
521        gradientNorm = fabs(changeObj) / sqrt(gradientNorm);
522        // Should try and get quadratic convergence by interpolation
523        if (changeObj < 0.0) {
524            // increasing is good
525            thetaA = thetaB;
526        } else {
527            // decreasing is good
528            thetaC = thetaB;
529        }
530        thetaB = 0.5 * (thetaA + thetaC);
531    }
532    delete [] tempSolution;
533    delete [] tempGradient;
534    predictedObj = objB;
535    return thetaB;
536}
537// Return objective value (without any ClpModel offset) (model may be NULL)
538double
539ClpAmplObjective::objectiveValue(const ClpSimplex * model, const double * solution) const
540{
541    CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
542    ASL_pfgh* asl = info->asl_;
543
544    int numberColumns = n_var;;
545    // current
546    double currentObj = 0.0;
547    eval_f(amplObjective_, numberColumns, solution, true, currentObj);
548    return currentObj;
549}
550// Scale objective
551void
552ClpAmplObjective::reallyScale(const double * columnScale)
553{
554    abort();
555}
556/* Given a zeroed array sets nonlinear columns to 1.
557   Returns number of nonlinear columns
558*/
559int
560ClpAmplObjective::markNonlinear(char * which)
561{
562    int iColumn;
563    CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
564    ASL_pfgh* asl = info->asl_;
565    int nonLinear = CoinMax(nlvc, nlvo);
566    for (iColumn = 0; iColumn < nonLinear; iColumn++) {
567        which[iColumn] = 1;
568    }
569    int numberNonLinearColumns = 0;
570    int numberColumns = n_var;;
571    for (iColumn = 0; iColumn < numberColumns; iColumn++) {
572        if (which[iColumn])
573            numberNonLinearColumns++;
574    }
575    return numberNonLinearColumns;
576}
577// Say we have new primal solution - so may need to recompute
578void
579ClpAmplObjective::newXValues()
580{
581    CbcAmplInfo * info = (CbcAmplInfo *) amplObjective_;
582    info->conval_called_with_current_x_ = false;
583    info->objval_called_with_current_x_ = false;
584    info->jacval_called_with_current_x_ = false;
585}
586
587//#############################################################################
588// Constructors / Destructor / Assignment
589//#############################################################################
590//-------------------------------------------------------------------
591// Default Constructor
592//-------------------------------------------------------------------
593ClpConstraintAmpl::ClpConstraintAmpl ()
594        : ClpConstraint()
595{
596    type_ = 3;
597    column_ = NULL;
598    coefficient_ = NULL;
599    numberCoefficients_ = 0;
600    amplInfo_ = NULL;
601}
602
603//-------------------------------------------------------------------
604// Useful Constructor
605//-------------------------------------------------------------------
606ClpConstraintAmpl::ClpConstraintAmpl (int row, void * amplInfo)
607        : ClpConstraint()
608{
609    type_ = 3;
610    rowNumber_ = row;
611    amplInfo_ = amplInfo;
612    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo_;
613#ifndef NDEBUG
614    ASL_pfgh* asl = info->asl_;
615#endif
616    // warning: nlc is a macro that assumes we have a variable called asl
617    assert (rowNumber_ < nlc);
618    numberCoefficients_ = info->rowStart_[rowNumber_+1] - info->rowStart_[rowNumber_];
619    column_ = CoinCopyOfArray(info->column_ + info->rowStart_[rowNumber_], numberCoefficients_);
620    coefficient_ = new double [numberCoefficients_];;
621}
622
623//-------------------------------------------------------------------
624// Copy constructor
625//-------------------------------------------------------------------
626ClpConstraintAmpl::ClpConstraintAmpl (const ClpConstraintAmpl & rhs)
627        : ClpConstraint(rhs)
628{
629    numberCoefficients_ = rhs.numberCoefficients_;
630    column_ = CoinCopyOfArray(rhs.column_, numberCoefficients_);
631    coefficient_ = CoinCopyOfArray(rhs.coefficient_, numberCoefficients_);
632}
633
634
635//-------------------------------------------------------------------
636// Destructor
637//-------------------------------------------------------------------
638ClpConstraintAmpl::~ClpConstraintAmpl ()
639{
640    delete [] column_;
641    delete [] coefficient_;
642}
643
644//----------------------------------------------------------------
645// Assignment operator
646//-------------------------------------------------------------------
647ClpConstraintAmpl &
648ClpConstraintAmpl::operator=(const ClpConstraintAmpl & rhs)
649{
650    if (this != &rhs) {
651        delete [] column_;
652        delete [] coefficient_;
653        numberCoefficients_ = rhs.numberCoefficients_;
654        column_ = CoinCopyOfArray(rhs.column_, numberCoefficients_);
655        coefficient_ = CoinCopyOfArray(rhs.coefficient_, numberCoefficients_);
656    }
657    return *this;
658}
659//-------------------------------------------------------------------
660// Clone
661//-------------------------------------------------------------------
662ClpConstraint * ClpConstraintAmpl::clone() const
663{
664    return new ClpConstraintAmpl(*this);
665}
666
667// Returns gradient
668int
669ClpConstraintAmpl::gradient(const ClpSimplex * model,
670                            const double * solution,
671                            double * gradient,
672                            double & functionValue,
673                            double & offset,
674                            bool useScaling,
675                            bool refresh) const
676{
677    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo_;
678    ASL_pfgh* asl = info->asl_;
679    int numberColumns = n_var;;
680    // If not done then do all
681    if (!info->jacval_called_with_current_x_) {
682        bool getStuff = eval_g(amplInfo_, numberColumns, solution, true, info->constraintValues_);
683        assert (getStuff);
684        getStuff = eval_jac_g(amplInfo_, numberColumns, solution, false, info->gradient_);
685        assert (getStuff);
686        info->jacval_called_with_current_x_ = getStuff;
687    }
688    if (refresh || !lastGradient_) {
689        functionValue_ = info->constraintValues_[rowNumber_];
690        offset_ = functionValue_; // sign??
691        if (!lastGradient_)
692            lastGradient_ = new double[numberColumns];
693        CoinZeroN(lastGradient_, numberColumns);
694        assert (!(model && model->rowScale() && useScaling));
695        int i;
696        int start = info->rowStart_[rowNumber_];
697        assert (numberCoefficients_ == info->rowStart_[rowNumber_+1] - start);
698        for (i = 0; i < numberCoefficients_; i++) {
699            int iColumn = column_[i];
700            double valueS = solution[iColumn];
701            double valueG = info->gradient_[start+i];
702            lastGradient_[iColumn] = valueG;
703            offset_ -= valueS * valueG;
704        }
705    }
706    functionValue = functionValue_;
707    offset = offset_;
708    memcpy(gradient, lastGradient_, numberColumns*sizeof(double));
709    return 0;
710}
711// Resize constraint
712void
713ClpConstraintAmpl::resize(int newNumberColumns)
714{
715    abort();
716}
717// Delete columns in  constraint
718void
719ClpConstraintAmpl::deleteSome(int numberToDelete, const int * which)
720{
721    if (numberToDelete) {
722        abort();
723    }
724}
725// Scale constraint
726void
727ClpConstraintAmpl::reallyScale(const double * columnScale)
728{
729    abort();
730}
731/* Given a zeroed array sets nonlinear columns to 1.
732   Returns number of nonlinear columns
733*/
734int
735ClpConstraintAmpl::markNonlinear(char * which) const
736{
737    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo_;
738    ASL_pfgh* asl = info->asl_;
739    int iColumn;
740    int numberNon = 0;
741    int nonLinear = CoinMax(nlvc, nlvo);
742    for (iColumn = 0; iColumn < numberCoefficients_; iColumn++) {
743        int jColumn = column_[iColumn];
744        if (jColumn < nonLinear) {
745            which[jColumn] = 1;
746            numberNon++;
747        }
748    }
749    return numberNon;
750}
751/* Given a zeroed array sets possible nonzero coefficients to 1.
752   Returns number of nonzeros
753*/
754int
755ClpConstraintAmpl::markNonzero(char * which) const
756{
757    int iColumn;
758    for (iColumn = 0; iColumn < numberCoefficients_; iColumn++) {
759        which[column_[iColumn]] = 1;
760    }
761    return numberCoefficients_;
762}
763// Number of coefficients
764int
765ClpConstraintAmpl::numberCoefficients() const
766{
767    return numberCoefficients_;
768}
769// Say we have new primal solution - so may need to recompute
770void
771ClpConstraintAmpl::newXValues()
772{
773    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo_;
774    info->conval_called_with_current_x_ = false;
775    info->objval_called_with_current_x_ = false;
776    info->jacval_called_with_current_x_ = false;
777}
778
779/* Load nonlinear part of problem from AMPL info
780   Returns 0 if linear
781   1 if quadratic objective
782   2 if quadratic constraints
783   3 if nonlinear objective
784   4 if nonlinear constraints
785   -1 on failure
786*/
787int
788ClpSimplex::loadNonLinear(void * amplInfo, int & numberConstraints,
789                          ClpConstraint ** & constraints)
790{
791    numberConstraints = 0;
792    constraints = NULL;
793    CbcAmplInfo * info = (CbcAmplInfo *) amplInfo;
794    ASL_pfgh* asl = info->asl_;
795    // For moment don't say quadratic
796    int type = 0;
797    if (nlo + nlc) {
798        // nonlinear
799        if (!nlc) {
800            type = 3;
801            delete objective_;
802            objective_ = new ClpAmplObjective(amplInfo);
803        } else {
804            type = 4;
805            numberConstraints = nlc;
806            constraints = new ClpConstraint * [numberConstraints];
807            if (nlo) {
808                delete objective_;
809                objective_ = new ClpAmplObjective(amplInfo);
810            }
811            for (int i = 0; i < numberConstraints; i++) {
812                constraints[i] = new ClpConstraintAmpl(i, amplInfo);
813            }
814        }
815    }
816    return type;
817}
818#else
819#include "ClpSimplex.hpp"
820#include "ClpConstraint.hpp"
821int
822ClpSimplex::loadNonLinear(void * , int & ,
823                          ClpConstraint ** & )
824{
825    abort();
826    return 0;
827}
828#endif
Note: See TracBrowser for help on using the repository browser.