Changeset 517


Ignore:
Timestamp:
Sep 13, 2005 6:51:57 PM (15 years ago)
Author:
andreasw
Message:
  • enabled warm start option for using same application and also for solving problems with identical structure
  • minor cosmetic changes
Location:
branches/dev
Files:
23 edited

Legend:

Unmodified
Added
Removed
  • branches/dev/Algorithm/IpAlgBuilder.cpp

    r501 r517  
    107107      "strategy. (Only considered if \"adaptive\" is selected for option "
    108108      "\"mu_strategy\".)");
    109     roptions->SetRegisteringCategory("Initialization");
    110     roptions->AddStringOption2(
    111       "warm_start_init_point",
    112       "Warm-start for initial point", "no",
    113       "no", "do not use the warm start initialization",
    114       "yes", "use the warm start initialization",
    115       "Indicates whether this optimization should use a warm start "
    116       "initialization, where values of primal and dual variables are "
    117       "given (e.g., from a previous optimization of a related problem.)");
    118109  }
    119110
     
    190181      new PDFullSpaceSolver(*AugSolver, *pertHandler);
    191182
    192     // Create the object for initializing the iterates
    193     // Initialization object
     183    // Create the object for initializing the iterates Initialization
     184    // object.  We include both the warm start and the defaut
     185    // initializer, so that the warm start options can be activated
     186    // without having to rebuild the algorithm
    194187    SmartPtr<EqMultiplierCalculator> EqMultCalculator =
    195188      new LeastSquareMultipliers(*AugSolver);
    196     SmartPtr<IterateInitializer> IterInitializer;
    197     bool warm_start_init_point;
    198     std::string warm_start_option;
    199     options.GetStringValue("warm_start_init_point", warm_start_option, prefix);
    200     warm_start_init_point = (warm_start_option == "yes");
    201 
    202     if (warm_start_init_point) {
    203       IterInitializer = new WarmStartIterateInitializer();
    204     }
    205     else {
    206       IterInitializer = new DefaultIterateInitializer(EqMultCalculator);
    207     }
     189    SmartPtr<IterateInitializer> WarmStartInitializer =
     190      new WarmStartIterateInitializer();
     191    SmartPtr<IterateInitializer> IterInitializer =
     192      new DefaultIterateInitializer(EqMultCalculator, WarmStartInitializer);
    208193
    209194    // Solver for the restoration phase
  • branches/dev/Algorithm/IpDefaultIterateInitializer.cpp

    r510 r517  
    1616
    1717  DefaultIterateInitializer::DefaultIterateInitializer
    18   (const SmartPtr<EqMultiplierCalculator>& eq_mult_calculator)
     18  (const SmartPtr<EqMultiplierCalculator>& eq_mult_calculator,
     19   const SmartPtr<IterateInitializer>& warm_start_initializer)
    1920      :
    2021      IterateInitializer(),
    21       eq_mult_calculator_(eq_mult_calculator)
     22      eq_mult_calculator_(eq_mult_calculator),
     23      warm_start_initializer_(warm_start_initializer)
    2224  {}
    2325
     
    5658      "All dual variables corresponding to bound constraints are "
    5759      "initialized to this value.");
     60    reg_options->AddStringOption2(
     61      "warm_start_init_point",
     62      "Warm-start for initial point", "no",
     63      "no", "do not use the warm start initialization",
     64      "yes", "use the warm start initialization",
     65      "Indicates whether this optimization should use a warm start "
     66      "initialization, where values of primal and dual variables are "
     67      "given (e.g., from a previous optimization of a related problem.)");
    5868  }
    5969
     
    6474    options.GetNumericValue("bound_push", bound_push_, prefix);
    6575    options.GetNumericValue("bound_frac", bound_frac_, prefix);
    66     options.GetNumericValue("constr_mult_init_max", constr_mult_init_max_, prefix);
    67     options.GetNumericValue("bound_mult_init_val", bound_mult_init_val_, prefix);
     76    options.GetNumericValue("constr_mult_init_max",
     77                            constr_mult_init_max_, prefix);
     78    options.GetNumericValue("bound_mult_init_val",
     79                            bound_mult_init_val_, prefix);
     80    options.GetBoolValue("warm_start_init_point",
     81                         warm_start_init_point_, prefix);
    6882
    6983    bool retvalue = true;
     
    7185      retvalue = eq_mult_calculator_->Initialize(Jnlst(), IpNLP(), IpData(),
    7286                 IpCq(), options, prefix);
     87      if (!retvalue) {
     88        return retvalue;
     89      }
     90    }
     91    if (IsValid(warm_start_initializer_)) {
     92      retvalue =
     93        warm_start_initializer_->Initialize(Jnlst(), IpNLP(), IpData(),
     94                                            IpCq(), options, prefix);
    7395    }
    7496    return retvalue;
     
    79101    DBG_START_METH("DefaultIterateInitializer::SetInitialIterates",
    80102                   dbg_verbosity);
     103
     104    if (warm_start_init_point_) {
     105      DBG_ASSERT(IsValid(warm_start_initializer_));
     106      return warm_start_initializer_->SetInitialIterates();
     107    }
    81108
    82109    // Get the starting values provided by the NLP and store them
  • branches/dev/Algorithm/IpDefaultIterateInitializer.hpp

    r501 r517  
    2828    /** Constructor.  If eq_mult_calculator is not NULL, it will be
    2929     *  used to compute the initial values for equality constraint
    30      *  multipliers. */
     30     *  multipliers.  If warm_start_initializer is not NULL, it will
     31     *  be used to compute the initial values if the option
     32     *  warm_start_init_point is chosen. */
    3133    DefaultIterateInitializer
    32     (const SmartPtr<EqMultiplierCalculator>& eq_mult_calculator);
     34    (const SmartPtr<EqMultiplierCalculator>& eq_mult_calculator,
     35     const SmartPtr<IterateInitializer>& warm_start_initializer);
    3336
    3437    /** Default destructor */
     
    105108    Number bound_frac_;
    106109
    107     // Qu: Why wouldn't this go in the EqMultiplierCalculator?
    108110    /** If max-norm of the initial equality constraint multiplier
    109111     *  estimate is larger than this, the initial y_* variables are
     
    112114    /** Initial value for all bound mulitpliers. */
    113115    Number bound_mult_init_val_;
     116    /** Flag indicating whether warm_start_initializer should be used
     117     *  instead of the default initialization */
     118    bool warm_start_init_point_;
    114119    //@}
    115120
     
    117122     *  constraint multipliers. */
    118123    SmartPtr<EqMultiplierCalculator> eq_mult_calculator_;
     124
     125    /** object to be used for a warm start initialization */
     126    SmartPtr<IterateInitializer> warm_start_initializer_;
    119127  };
    120128
  • branches/dev/Algorithm/IpFilterLineSearch.cpp

    r510 r517  
    14781478  {
    14791479    DBG_START_METH("FilterLineSearch::PerformMagicStep",
    1480                    dbg_verbosity);
     1480                   2);//dbg_verbosity);
    14811481
    14821482    DBG_PRINT((1,"Incoming barr = %e and constrviol %e\n",
     
    15601560      SmartPtr<IteratesVector> trial = IpData().trial()->MakeNewContainer();
    15611561      trial->Set_s(*delta_s_magic);
     1562
     1563      // also update the set in the dual variables
     1564
     1565
    15621566      IpData().set_trial(trial);
    15631567    }
  • branches/dev/Algorithm/IpIpoptCalculatedQuantities.cpp

    r493 r517  
    192192    options.GetEnumValue("constraint_violation_norm_type", enum_int, prefix);
    193193    constr_viol_normtype_ = ENormType(enum_int);
     194    // The following option is registered by OrigIpoptNLP
     195    options.GetBoolValue("warm_start_same_structure",
     196                         warm_start_same_structure_, prefix);
     197
     198    if (!warm_start_same_structure_) {
     199      dampind_x_L_ = NULL;
     200      dampind_x_U_ = NULL;
     201      dampind_s_L_ = NULL;
     202      dampind_s_U_ = NULL;
     203
     204      tmp_x_ = NULL;
     205      tmp_s_ = NULL;
     206      tmp_c_ = NULL;
     207      tmp_d_ = NULL;
     208      tmp_x_L_ = NULL;
     209      tmp_x_U_ = NULL;
     210      tmp_s_L_ = NULL;
     211      tmp_s_U_ = NULL;
     212    }
     213
     214    num_adjusted_slack_x_L_ = 0;
     215    num_adjusted_slack_x_U_ = 0;
     216    num_adjusted_slack_s_L_ = 0;
     217    num_adjusted_slack_s_U_ = 0;
    194218
    195219    initialize_called_ = true;
     
    849873    SmartPtr<const Vector> x = ip_data_->curr()->x();
    850874
    851     std::vector<const TaggedObject*> tdeps(0);
     875    std::vector<const TaggedObject*> tdeps(2);
     876    tdeps[0] = GetRawPtr(ip_nlp_->Px_L());
     877    tdeps[1] = GetRawPtr(ip_nlp_->Px_U());
    852878    std::vector<Number> sdeps(1);
    853879    sdeps[0] = kappa_d_;
     
    937963    SmartPtr<const Vector> s = ip_data_->curr()->s();
    938964
    939     std::vector<const TaggedObject*> tdeps(0);
     965    std::vector<const TaggedObject*> tdeps(2);
     966    tdeps[0] = GetRawPtr(ip_nlp_->Pd_L());
     967    tdeps[1] = GetRawPtr(ip_nlp_->Pd_U());
    940968    std::vector<Number> sdeps(1);
    941969    sdeps[0] = kappa_d_;
     
    964992
    965993  void
    966   IpoptCalculatedQuantities::ComputeDampingIndicators(SmartPtr<const Vector>& dampind_x_L,
    967       SmartPtr<const Vector>& dampind_x_U,
    968       SmartPtr<const Vector>& dampind_s_L,
    969       SmartPtr<const Vector>& dampind_s_U)
     994  IpoptCalculatedQuantities::ComputeDampingIndicators(
     995    SmartPtr<const Vector>& dampind_x_L,
     996    SmartPtr<const Vector>& dampind_x_U,
     997    SmartPtr<const Vector>& dampind_s_L,
     998    SmartPtr<const Vector>& dampind_s_U)
    970999  {
    9711000    DBG_START_METH("IpoptCalculatedQuantities::ComputeDampingFilters()",
  • branches/dev/Algorithm/IpIpoptCalculatedQuantities.hpp

    r501 r517  
    411411    /** Norm type to be used when calculating the constraint violation */
    412412    ENormType constr_viol_normtype_;
     413    /** Flag indicating whether the TNLP with identical structure has
     414     *  already been solved before. */
     415    bool warm_start_same_structure_;
    413416    //@}
    414417
  • branches/dev/Algorithm/IpOrigIpoptNLP.cpp

    r490 r517  
    4242      jnlst_(jnlst),
    4343      nlp_(nlp),
     44      x_space_(NULL),
    4445      f_cache_(1),
    4546      grad_f_cache_(1),
     
    7374      "is set to zero, then then bounds relaxation is disabled. "
    7475      "(See Eqn.(35) in implmentation paper.)");
     76    roptions->AddStringOption2(
     77      "warm_start_same_structure",
     78      "Indicates whether a problem with a structure identical to the previous one is to be solved.",
     79      "no",
     80      "no", "Assume this is a new problem.",
     81      "yes", "Assume this is problem has known structure",
     82      "If \"yes\" is chosen, then the algorithm assumes that an NLP is now to "
     83      "be solved, whose strcture is identical to one that already was "
     84      "considered (with the same NLP object).");
    7585  }
    7686
     
    8090  {
    8191    options.GetNumericValue("bound_relax_factor", bound_relax_factor_, prefix);
     92    options.GetBoolValue("warm_start_same_structure",
     93                         warm_start_same_structure_, prefix);
     94
     95    // Reset the function evaluation counters (for warm start)
     96    f_evals_=0;
     97    grad_f_evals_=0;
     98    c_evals_=0;
     99    jac_c_evals_=0;
     100    d_evals_=0;
     101    jac_d_evals_=0;
     102    h_evals_=0;
     103
     104    // Reset the cache entries belonging to a dummy dependency.  This
     105    // is required for repeated solve, since the cache is not updated
     106    // if a dimension is zero
     107    std::vector<const TaggedObject*> deps(1);
     108    deps[0] = NULL;
     109    std::vector<Number> sdeps(0);
     110    c_cache_.InvalidateResult(deps, sdeps);
     111    d_cache_.InvalidateResult(deps, sdeps);
     112    jac_c_cache_.InvalidateResult(deps, sdeps);
     113    jac_d_cache_.InvalidateResult(deps, sdeps);
    82114
    83115    if (!nlp_->ProcessOptions(options, prefix)) {
     
    104136  {
    105137    DBG_ASSERT(initialized_);
    106 
    107     bool retValue = nlp_->GetSpaces(x_space_, c_space_, d_space_,
    108                                     x_l_space_, px_l_space_,
    109                                     x_u_space_, px_u_space_,
    110                                     d_l_space_, pd_l_space_,
    111                                     d_u_space_, pd_u_space_,
    112                                     jac_c_space_, jac_d_space_,
    113                                     h_space_);
    114 
    115     if (!retValue) {
    116       return false;
    117     }
    118 
    119     NLP_scaling()->DetermineScaling(x_space_,
    120                                     c_space_, d_space_,
    121                                     jac_c_space_, jac_d_space_,
    122                                     h_space_,
    123                                     scaled_jac_c_space_, scaled_jac_d_space_,
    124                                     scaled_h_space_);
    125 
    126     ASSERT_EXCEPTION(x_space_->Dim() >= c_space_->Dim(), TOO_FEW_DOF,
    127                      "Too few degrees of freedom!");
    128 
    129     // cannot have any null pointers, want zero length vectors
    130     // instead of null - this will later need to be changed for _h;
    131     retValue = (IsValid(x_space_) && IsValid(c_space_) && IsValid(d_space_)
    132                 && IsValid(x_l_space_) && IsValid(px_l_space_)
    133                 && IsValid(x_u_space_) && IsValid(px_u_space_)
    134                 && IsValid(d_u_space_) && IsValid(pd_u_space_)
    135                 && IsValid(d_l_space_) && IsValid(pd_l_space_)
    136                 && IsValid(jac_c_space_) && IsValid(jac_d_space_)
    137                 && IsValid(h_space_)
    138                 && IsValid(scaled_jac_c_space_) && IsValid(scaled_jac_d_space_)
    139                 && IsValid(scaled_h_space_));
    140 
    141     DBG_ASSERT(retValue && "Model cannot return null vector or matrix prototypes or spaces,"
    142                " please return zero length vectors instead");
     138    bool retValue;
     139
     140    if (!warm_start_same_structure_) {
     141
     142      retValue = nlp_->GetSpaces(x_space_, c_space_, d_space_,
     143                                 x_l_space_, px_l_space_,
     144                                 x_u_space_, px_u_space_,
     145                                 d_l_space_, pd_l_space_,
     146                                 d_u_space_, pd_u_space_,
     147                                 jac_c_space_, jac_d_space_,
     148                                 h_space_);
     149
     150      if (!retValue) {
     151        return false;
     152      }
     153
     154      NLP_scaling()->DetermineScaling(x_space_,
     155                                      c_space_, d_space_,
     156                                      jac_c_space_, jac_d_space_,
     157                                      h_space_,
     158                                      scaled_jac_c_space_, scaled_jac_d_space_,
     159                                      scaled_h_space_);
     160
     161      ASSERT_EXCEPTION(x_space_->Dim() >= c_space_->Dim(), TOO_FEW_DOF,
     162                       "Too few degrees of freedom!");
     163
     164      // cannot have any null pointers, want zero length vectors
     165      // instead of null - this will later need to be changed for _h;
     166      retValue = (IsValid(x_space_) && IsValid(c_space_) && IsValid(d_space_)
     167                  && IsValid(x_l_space_) && IsValid(px_l_space_)
     168                  && IsValid(x_u_space_) && IsValid(px_u_space_)
     169                  && IsValid(d_u_space_) && IsValid(pd_u_space_)
     170                  && IsValid(d_l_space_) && IsValid(pd_l_space_)
     171                  && IsValid(jac_c_space_) && IsValid(jac_d_space_)
     172                  && IsValid(h_space_)
     173                  && IsValid(scaled_jac_c_space_)
     174                  && IsValid(scaled_jac_d_space_)
     175                  && IsValid(scaled_h_space_));
     176
     177      DBG_ASSERT(retValue && "Model cannot return null vector or matrix prototypes or spaces,"
     178                 " please return zero length vectors instead");
     179    }
     180    else {
     181      ASSERT_EXCEPTION(IsValid(x_space_), INVALID_WARMSTART,
     182                       "OrigIpoptNLP called with warm_start_same_structure, but the problem is solved for the first time.");
     183    }
    143184
    144185    // Create the bounds structures
  • branches/dev/Algorithm/IpOrigIpoptNLP.hpp

    r501 r517  
    213213    //@}
    214214
    215     /** Methods for IpoptType */
     215    /** @name Methods for IpoptType */
    216216    //@{
    217217    /** Called by IpoptType to register the options */
    218218    static void RegisterOptions(SmartPtr<RegisteredOptions> roptions);
    219219    //@}
     220
     221    /** Accessor method to the underlying NLP */
     222    SmartPtr<NLP> nlp()
     223    {
     224      return nlp_;
     225    }
    220226
    221227  private:
     
    330336    /** relaxation factor for the bounds */
    331337    Number bound_relax_factor_;
     338    /** Flag indicating whether the TNLP with identical structure has
     339     *  already been solved before. */
     340    bool warm_start_same_structure_;
    332341    //@}
    333342
  • branches/dev/Algorithm/IpOrigIterationOutput.cpp

    r501 r517  
    3535  {
    3636    std::string prev_cat = roptions->RegisteringCategory();
    37     roptions->SetRegisteringCategory("Undocumented");
     37    roptions->SetRegisteringCategory("Output");
    3838    roptions->AddStringOption2(
    3939      "print_info_string",
  • branches/dev/Algorithm/IpPDPerturbationHandler.cpp

    r501 r517  
    178178      bool retval = get_deltas_for_wrong_inertia(delta_x, delta_s,
    179179                    delta_c, delta_d);
     180      Jnlst().Printf(J_ERROR, J_MAIN, "Cannot determine appropriate modification of Hessian matrix.\nThis can happen if there are NaN or Inf numbers in the user-provided Hessian.\n");
    180181      ASSERT_EXCEPTION(retval, INTERNAL_ABORT,
    181182                       "get_deltas_for_wrong_inertia returns false.");
  • branches/dev/Algorithm/IpStdAugSystemSolver.cpp

    r501 r517  
    4747      d_d_tag_(0),
    4848      delta_d_(0.),
    49       augsys_tag_(0),
    50       augmented_system_(NULL),
    5149      old_w_(NULL)
    5250  {
     
    6462                                          const std::string& prefix)
    6563  {
     64    // This option is registered by OrigIpoptNLP
     65    options.GetBoolValue("warm_start_same_structure",
     66                         warm_start_same_structure_, prefix);
     67
     68    if (!warm_start_same_structure_) {
     69      augsys_tag_ = 0;
     70      augmented_system_ = NULL;
     71    }
     72    else {
     73      ASSERT_EXCEPTION(IsValid(augmented_system_), INVALID_WARMSTART,
     74                       "StdAugSystemSolver called with warm_start_same_structure, but augmented system is not initialized.");
     75    }
     76
    6677    return linsolver_->Initialize(Jnlst(), IpNLP(), IpData(), IpCq(),
    6778                                  options, prefix);
  • branches/dev/Algorithm/IpStdAugSystemSolver.hpp

    r501 r517  
    234234     *  the nonzero structure of the augmented_system_ consistent */
    235235    SmartPtr<const SymMatrix> old_w_;
     236
     237    /** @name Algorithmic parameters */
     238    //@{
     239    /** Flag indicating whether the TNLP with identical structure has
     240     *  already been solved before. */
     241    bool warm_start_same_structure_;
     242    //@}
    236243  };
    237244
  • branches/dev/Algorithm/LinearSolvers/IpMa27TSolverInterface.cpp

    r501 r517  
    115115    if(options.GetNumericValue("pivtolmax", pivtolmax_, prefix)) {
    116116      ASSERT_EXCEPTION(pivtolmax_>=pivtol_, OPTION_INVALID,
    117                        "Option \"pivtolmax\": This value must be between pivtol and 1.");
     117                       "Option \"pivtolmax\": This value must be between "
     118                       "pivtol and 1.");
    118119    }
    119120    else {
     
    124125    options.GetNumericValue("la_init_factor", la_init_factor_, prefix);
    125126    options.GetNumericValue("meminc_factor", meminc_factor_, prefix);
     127    // The following option is registered by OrigIpoptNLP
     128    options.GetBoolValue("warm_start_same_structure",
     129                         warm_start_same_structure_, prefix);
    126130
    127131    /* Set the default options for MA27 */
     
    134138
    135139    // Reset all private data
    136     dim_=0;
    137     nonzeros_=0;
    138140    initialized_=false;
    139141    pivtol_changed_ = false;
     
    142144    la_increase_=false;
    143145    liw_increase_=false;
     146
     147    if (!warm_start_same_structure_) {
     148      dim_=0;
     149      nonzeros_=0;
     150    }
     151    else {
     152      ASSERT_EXCEPTION(dim_>0 && nonzeros_>0, INVALID_WARMSTART,
     153                       "Ma27TSolverInterface called with warm_start_same_structure, but the problem is solved for the first time.");
     154    }
    144155
    145156    return true;
     
    213224  {
    214225    DBG_START_METH("Ma27TSolverInterface::InitializeStructure",dbg_verbosity);
    215     dim_ = dim;
    216     nonzeros_ = nonzeros;
    217 
    218     // Do the symbolic facotrization
    219     ESymSolverStatus retval = SymbolicFactorization(airn, ajcn);
    220     if (retval != SYMSOLVER_SUCCESS ) {
    221       return retval;
     226
     227    ESymSolverStatus retval = SYMSOLVER_SUCCESS;
     228    if (!warm_start_same_structure_) {
     229      dim_ = dim;
     230      nonzeros_ = nonzeros;
     231
     232      // Do the symbolic facotrization
     233      retval = SymbolicFactorization(airn, ajcn);
     234      if (retval != SYMSOLVER_SUCCESS ) {
     235        return retval;
     236      }
     237    }
     238    else {
     239      ASSERT_EXCEPTION(dim_==dim && nonzeros_==nonzeros, INVALID_WARMSTART,
     240                       "Ma27TSolverInterface called with warm_start_same_structure, but the problem size has changed.");
    222241    }
    223242
  • branches/dev/Algorithm/LinearSolvers/IpMa27TSolverInterface.hpp

    r501 r517  
    1 // Copyright (C) 2004, International Business Machines and others.
     1// Copyright (C) 2004, 2005 International Business Machines and others.
    22// All Rights Reserved.
    33// This code is published under the Common Public License.
     
    161161    /** Factor for increaseing memory */
    162162    Number meminc_factor_;
     163    /** Flag indicating whether the TNLP with identical structure has
     164     *  already been solved before. */
     165    bool warm_start_same_structure_;
    163166    //@}
    164167
  • branches/dev/Algorithm/LinearSolvers/IpSparseSymLinearSolverInterface.hpp

    r501 r517  
    4848   *  numbering).
    4949   *
    50    *  3. After this, the InitializeStructure method is call (once).
     50   *  3. After this, the InitializeStructure method is called (once).
    5151   *  Here, the structure of the matrix is provided.  If the linear
    5252   *  solver requires a symbolic preprocessing phase that can be done
     
    8787   *  Note, if the matrix is given in triplet format, entries might be
    8888   *  listed multiple times, in which case the corresponsing elements
    89    *  have to be added.  */
     89   *  have to be added.
     90   *
     91   *  A note for warm starts: If the option
     92   *  "warm_start_same_structure" is specified with "yes", the
     93   *  algorithm assumes that a problem with the same sparsity
     94   *  structure is solved for a repeated time.  In that case, the
     95   *  linear solver might reuse information from the previous
     96   *  optimization.  See Ma27TSolverInterface for an example.
     97  */
    9098  class SparseSymLinearSolverInterface: public AlgorithmStrategyObject
    9199  {
  • branches/dev/Algorithm/LinearSolvers/IpTSymLinearSolver.cpp

    r501 r517  
    3535      nonzeros_triplet_(0),
    3636      nonzeros_compressed_(0),
     37      have_structure_(false),
    3738      initialized_(false),
    3839
     
    5960                                        const std::string& prefix)
    6061  {
    61     // Reset all private data
    62     atag_=0;
    63     dim_=0;
    64     nonzeros_triplet_=0;
    65     nonzeros_compressed_=0;
    66     initialized_=false;
     62    // This option is registered by OrigIpoptNLP
     63    options.GetBoolValue("warm_start_same_structure",
     64                         warm_start_same_structure_, prefix);
    6765
    6866    if (!solver_interface_->Initialize(Jnlst(), IpNLP(), IpData(), IpCq(),
     
    7169    }
    7270
    73     matrix_format_ = solver_interface_->MatrixFormat();
    74     switch (matrix_format_) {
    75       case SparseSymLinearSolverInterface::CSR_Format_0_Offset:
    76       triplet_to_csr_converter_ = new TripletToCSRConverter(0);
    77       break;
    78       case SparseSymLinearSolverInterface::CSR_Format_1_Offset:
    79       triplet_to_csr_converter_ = new TripletToCSRConverter(1);
    80       break;
    81       case SparseSymLinearSolverInterface::Triplet_Format:
    82       triplet_to_csr_converter_ = NULL;
    83       break;
    84       default:
    85       DBG_ASSERT(false && "Invalid MatrixFormat returned from solver interface.");
    86       return false;
    87     }
     71    if (!warm_start_same_structure_) {
     72      // Reset all private data
     73      atag_=0;
     74      dim_=0;
     75      nonzeros_triplet_=0;
     76      nonzeros_compressed_=0;
     77      have_structure_=false;
     78
     79      matrix_format_ = solver_interface_->MatrixFormat();
     80      switch (matrix_format_) {
     81        case SparseSymLinearSolverInterface::CSR_Format_0_Offset:
     82        triplet_to_csr_converter_ = new TripletToCSRConverter(0);
     83        break;
     84        case SparseSymLinearSolverInterface::CSR_Format_1_Offset:
     85        triplet_to_csr_converter_ = new TripletToCSRConverter(1);
     86        break;
     87        case SparseSymLinearSolverInterface::Triplet_Format:
     88        triplet_to_csr_converter_ = NULL;
     89        break;
     90        default:
     91        DBG_ASSERT(false && "Invalid MatrixFormat returned from solver interface.");
     92        return false;
     93      }
     94    }
     95    else {
     96      ASSERT_EXCEPTION(have_structure_, INVALID_WARMSTART,
     97                       "TSymLinearSolver called with warm_start_same_structure, but the internal structures are not initialized.");
     98    }
     99
     100    // reset the initialize flag to make sure that InitializeStructure
     101    // is called for the linear solver
     102    initialized_=false;
    88103
    89104    bool retval = true;
     
    202217    DBG_ASSERT(!initialized_);
    203218
    204     dim_ = sym_A.Dim();
    205     nonzeros_triplet_ = TripletHelper::GetNumberEntries(sym_A);
    206 
    207     delete [] airn_;
    208     delete [] ajcn_;
    209     airn_ = new Index[nonzeros_triplet_];
    210     ajcn_ = new Index[nonzeros_triplet_];
    211 
    212     TripletHelper::FillRowCol(nonzeros_triplet_, sym_A, airn_, ajcn_);
    213 
    214     // If the solver wants the compressed format, the converter has to
    215     // be initialized
    216     const Index *ia;
    217     const Index *ja;
    218     Index nonzeros;
    219     if (matrix_format_ == SparseSymLinearSolverInterface::Triplet_Format) {
    220       ia = airn_;
    221       ja = ajcn_;
    222       nonzeros = nonzeros_triplet_;
     219    ESymSolverStatus retval;
     220
     221    // have_structure_ is already true if this is a warm start for a
     222    // problem with identical structure
     223    if (!have_structure_) {
     224
     225      dim_ = sym_A.Dim();
     226      nonzeros_triplet_ = TripletHelper::GetNumberEntries(sym_A);
     227
     228      delete [] airn_;
     229      delete [] ajcn_;
     230      airn_ = new Index[nonzeros_triplet_];
     231      ajcn_ = new Index[nonzeros_triplet_];
     232
     233      TripletHelper::FillRowCol(nonzeros_triplet_, sym_A, airn_, ajcn_);
     234
     235      // If the solver wants the compressed format, the converter has to
     236      // be initialized
     237      const Index *ia;
     238      const Index *ja;
     239      Index nonzeros;
     240      if (matrix_format_ == SparseSymLinearSolverInterface::Triplet_Format) {
     241        ia = airn_;
     242        ja = ajcn_;
     243        nonzeros = nonzeros_triplet_;
     244      }
     245      else {
     246        nonzeros_compressed_ =
     247          triplet_to_csr_converter_->InitializeConverter(dim_, nonzeros_triplet_,
     248              airn_, ajcn_);
     249        ia = triplet_to_csr_converter_->IA();
     250        ja = triplet_to_csr_converter_->JA();
     251        nonzeros = nonzeros_compressed_;
     252      }
     253
     254      retval = solver_interface_->InitializeStructure(dim_, nonzeros, ia, ja);
     255      if (retval != SYMSOLVER_SUCCESS) {
     256        return retval;
     257      }
     258
     259      // Get space for the scaling factors
     260      delete [] scaling_factors_;
     261      if (IsValid(scaling_method_)) {
     262        scaling_factors_ = new double[dim_];
     263      }
     264
     265      have_structure_ = true;
    223266    }
    224267    else {
    225       nonzeros_compressed_ =
    226         triplet_to_csr_converter_->InitializeConverter(dim_, nonzeros_triplet_,
    227             airn_, ajcn_);
    228       ia = triplet_to_csr_converter_->IA();
    229       ja = triplet_to_csr_converter_->JA();
    230       nonzeros = nonzeros_compressed_;
    231     }
    232 
    233     ESymSolverStatus retval =
    234       solver_interface_->InitializeStructure(dim_, nonzeros, ia, ja);
    235     if (retval != SYMSOLVER_SUCCESS) {
    236       return retval;
    237     }
    238 
    239     // Get space for the scaling factors
    240     delete [] scaling_factors_;
    241     if (IsValid(scaling_method_)) {
    242       scaling_factors_ = new double[dim_];
    243     }
    244 
    245     initialized_ = true;
     268      ASSERT_EXCEPTION(dim_==sym_A.Dim(), INVALID_WARMSTART,
     269                       "TSymLinearSolver called with warm_start_same_structure, but the problem is solved for the first time.");
     270      // This is a warm start for identical structure, so we don't need to
     271      // recompute the nonzeros location arrays
     272      const Index *ia;
     273      const Index *ja;
     274      Index nonzeros;
     275      if (matrix_format_ == SparseSymLinearSolverInterface::Triplet_Format) {
     276        ia = airn_;
     277        ja = ajcn_;
     278        nonzeros = nonzeros_triplet_;
     279      }
     280      else {
     281        ia = triplet_to_csr_converter_->IA();
     282        ja = triplet_to_csr_converter_->JA();
     283        nonzeros = nonzeros_compressed_;
     284      }
     285      retval = solver_interface_->InitializeStructure(dim_, nonzeros, ia, ja);
     286    }
     287    initialized_=true;
    246288    return retval;
    247289  }
  • branches/dev/Algorithm/LinearSolvers/IpTSymLinearSolver.hpp

    r501 r517  
    122122    /** @name Initialization flags */
    123123    //@{
    124     /** Flag indicating if internal data is initialized.
     124    /** Flag indicating if the internal structures are initialized.
    125125     *  For initialization, this object needs to have seen a matrix */
     126    bool have_structure_;
     127    /** Flag indicating if the InitializeStructure method has been
     128     *  called for the linear solver. */
    126129    bool initialized_;
    127130    //@}
     
    151154    //@}
    152155
     156    /** @name Algorithmic parameters */
     157    //@{
     158    /** Flag indicating whether the TNLP with identical structure has
     159     *  already been solved before. */
     160    bool warm_start_same_structure_;
     161    //@}
     162
    153163    /** @name Internal functions */
    154164    //@{
  • branches/dev/Common/IpCachedResults.hpp

    r501 r517  
    157157                             const TaggedObject* dependent3);
    158158
    159     // CARL: What do you think about the following?  I added this at
    160     // some point because then now GetRawPtr was necessary
    161159    /** @name Pointer-free version of the Add and Get methods */
    162160    //@{
     
    197195    }
    198196    //@}
     197
     198    /** Invalidates the result for given dependecies. Sets the stale
     199     *  flag for the corresponding cached result to true if it is
     200     *  found.  Returns true, if the result was found. */
     201    bool InvalidateResult(const std::vector<const TaggedObject*>& dependents,
     202                          const std::vector<Number>& scalar_dependents);
    199203
    200204  private:
     
    263267    bool IsStale() const;
    264268
     269    /** Invalidates the cached result. */
     270    void Invalidate();
     271
    265272    /** Returns the cached result. */
    266273    const T& GetResult() const;
     
    379386  }
    380387
     388  template <class T>
     389  void DependentResult<T>::Invalidate()
     390  {
     391    stale_ = true;
     392  }
    381393
    382394  template <class T>
     
    659671
    660672    return GetCachedResult(retResult, dependents);
     673  }
     674
     675  template <class T>
     676  bool CachedResults<T>::InvalidateResult(const std::vector<const TaggedObject*>& dependents,
     677                                          const std::vector<Number>& scalar_dependents)
     678  {
     679    if (!cached_results_)
     680      return false;
     681
     682    CleanupInvalidatedResults();
     683
     684    bool retValue = false;
     685    typename std::list< DependentResult<T>* >::const_iterator iter;
     686    for (iter = cached_results_->begin(); iter != cached_results_->end(); iter++) {
     687      if ((*iter)->DependentsIdentical(dependents, scalar_dependents)) {
     688        (*iter)->Invalidate();
     689        retValue = true;
     690        break;
     691      }
     692    }
     693
     694    return retValue;
    661695  }
    662696
  • branches/dev/Interfaces/IpAlgTypes.hpp

    r501 r517  
    3838  DECLARE_STD_EXCEPTION(ACCEPTABLE_POINT_REACHED);
    3939  DECLARE_STD_EXCEPTION(FEASIBILITY_PROBLEM_SOLVED);
     40  DECLARE_STD_EXCEPTION(INVALID_WARMSTART);
    4041  DECLARE_STD_EXCEPTION(INTERNAL_ABORT);
    4142  /** Exception FAILED_INITIALIZATION for problem during
  • branches/dev/Interfaces/IpIpoptApplication.cpp

    r516 r517  
    3232      jnlst_(new Journalist()),
    3333      options_(new OptionsList()),
    34       statistics_(NULL)
     34      statistics_(NULL),
     35      alg_(NULL),
     36      nlp_adapter_(NULL)
    3537  {
    3638    try {
     
    156158          std::list<std::string> categories;
    157159          categories.push_back("Output");
    158           categories.push_back("Main Algorithm");
     160          /*categories.push_back("Main Algorithm");*/
    159161          categories.push_back("Convergence");
    160162          categories.push_back("NLP Scaling");
     
    269271  }
    270272
    271   ApplicationReturnStatus IpoptApplication::OptimizeTNLP(const SmartPtr<TNLP>& nlp)
    272   {
    273     SmartPtr<NLP> nlp_adapter =
    274       new TNLPAdapter(GetRawPtr(nlp), ConstPtr(jnlst_));
    275 
    276     return OptimizeNLP(nlp_adapter);
     273  ApplicationReturnStatus
     274  IpoptApplication::OptimizeTNLP(const SmartPtr<TNLP>& tnlp)
     275  {
     276    nlp_adapter_ = new TNLPAdapter(GetRawPtr(tnlp), ConstPtr(jnlst_));
     277
     278    return OptimizeNLP(nlp_adapter_);
     279  }
     280
     281  ApplicationReturnStatus
     282  IpoptApplication::ReOptimizeTNLP(const SmartPtr<TNLP>& tnlp)
     283  {
     284    ASSERT_EXCEPTION(IsValid(nlp_adapter_), INVALID_WARMSTART,
     285                     "ReOptimizeTNLP called before OptimizeTNLP.");
     286    TNLPAdapter* adapter =
     287      dynamic_cast<TNLPAdapter*> (GetRawPtr(nlp_adapter_));
     288    DBG_ASSERT(adapter);
     289    ASSERT_EXCEPTION(adapter->tnlp()==tnlp, INVALID_WARMSTART,
     290                     "ReOptimizeTNLP called for different TNLP.")
     291
     292    return ReOptimizeNLP(nlp_adapter_);
    277293  }
    278294
    279295  ApplicationReturnStatus
    280296  IpoptApplication::OptimizeNLP(const SmartPtr<NLP>& nlp)
     297  {
     298    ApplicationReturnStatus retValue = Internal_Error;
     299
     300    // Prepare internal data structures of the algorithm
     301    try {
     302
     303      SmartPtr<NLPScalingObject> nlp_scaling;
     304      std::string nlp_scaling_method;
     305      options_->GetStringValue("nlp_scaling_method", nlp_scaling_method, "");
     306      if (nlp_scaling_method == "user_scaling") {
     307        nlp_scaling = new UserScaling(ConstPtr(nlp));
     308      }
     309      else if (nlp_scaling_method == "gradient_based") {
     310        nlp_scaling = new GradientScaling(nlp);
     311      }
     312      else {
     313        nlp_scaling = new NoNLPScalingObject();
     314      }
     315
     316      ip_nlp_ =  new OrigIpoptNLP(ConstPtr(jnlst_), GetRawPtr(nlp), nlp_scaling);
     317
     318      // Create the IpoptData
     319      ip_data_ = new IpoptData();
     320
     321      // Create the IpoptCalculators
     322      ip_cq_ = new IpoptCalculatedQuantities(ip_nlp_, ip_data_);
     323
     324      // Create the Algorithm object
     325      alg_ = AlgorithmBuilder::BuildBasicAlgorithm(*jnlst_, *options_, "");
     326
     327      // finally call the optimization
     328      retValue = call_optimize();
     329    }
     330    catch(TOO_FEW_DOF& exc) {
     331      //exc.ReportException(*jnlst_);
     332      jnlst_->Printf(J_SUMMARY, J_MAIN, "\nEXIT: Problem has too few degrees of freedom.\n");
     333      retValue = Not_Enough_Degrees_Of_Freedom;
     334    }
     335    catch(OPTION_INVALID& exc) {
     336      exc.ReportException(*jnlst_);
     337      retValue = Invalid_Option;
     338    }
     339    catch(IpoptException& exc) {
     340      exc.ReportException(*jnlst_);
     341      retValue = Unrecoverable_Exception;
     342    }
     343    catch(std::bad_alloc& exc) {
     344      retValue = Insufficient_Memory;
     345      jnlst_->Printf(J_SUMMARY, J_MAIN, "\nEXIT: Not enough memory.\n");
     346    }
     347    catch(...) {
     348      IpoptException exc("Unknown Exception caught in Ipopt", "Unknown File", -1);
     349      exc.ReportException(*jnlst_);
     350      retValue = NonIpopt_Exception_Thrown;
     351    }
     352
     353    jnlst_->FlushBuffer();
     354
     355    return retValue;
     356  }
     357
     358  ApplicationReturnStatus
     359  IpoptApplication::ReOptimizeNLP(const SmartPtr<NLP>& nlp)
     360  {
     361    ASSERT_EXCEPTION(IsValid(alg_), INVALID_WARMSTART,
     362                     "ReOptimizeNLP called before OptimizeNLP.");
     363    OrigIpoptNLP* orig_nlp =
     364      dynamic_cast<OrigIpoptNLP*> (GetRawPtr(ip_nlp_));
     365    DBG_ASSERT(orig_nlp);
     366    ASSERT_EXCEPTION(orig_nlp->nlp()==nlp, INVALID_WARMSTART,
     367                     "ReOptimizeTNLP called for different NLP.")
     368
     369    return call_optimize();
     370  }
     371
     372
     373  ApplicationReturnStatus IpoptApplication::call_optimize()
    281374  {
    282375    // Reset the print-level for the screen output
     
    292385    statistics_ = NULL; /* delete old statistics */
    293386    ApplicationReturnStatus retValue = Internal_Error;
    294     SmartPtr<IpoptData> ip_data;
    295     SmartPtr<IpoptCalculatedQuantities> ip_cq;
    296     SmartPtr<IpoptNLP> ip_nlp;
    297387    try {
    298 
    299       SmartPtr<NLPScalingObject> nlp_scaling;
    300       std::string nlp_scaling_method;
    301       options_->GetStringValue("nlp_scaling_method", nlp_scaling_method, "");
    302       if (nlp_scaling_method == "user_scaling") {
    303         nlp_scaling = new UserScaling(ConstPtr(nlp));
    304       }
    305       else if (nlp_scaling_method == "gradient_based") {
    306         nlp_scaling = new GradientScaling(nlp);
    307       }
    308       else {
    309         nlp_scaling = new NoNLPScalingObject();
    310       }
    311 
    312       ip_nlp =  new OrigIpoptNLP(ConstPtr(jnlst_), GetRawPtr(nlp), nlp_scaling);
    313 
    314       // Create the IpoptData
    315       if (IsNull(ip_data)) {
    316         ip_data = new IpoptData();
    317       }
    318 
    319       // Create the IpoptCalculators
    320       if (IsNull(ip_cq)) {
    321         ip_cq = new IpoptCalculatedQuantities(ip_nlp, ip_data);
    322       }
    323 
    324       // Create the Algorithm object
    325       SmartPtr<IpoptAlgorithm> alg
    326       = AlgorithmBuilder::BuildBasicAlgorithm(*jnlst_, *options_, "");
    327 
    328388      // Set up the algorithm
    329       alg->Initialize(*jnlst_, *ip_nlp, *ip_data, *ip_cq, *options_, "");
     389      alg_->Initialize(*jnlst_, *ip_nlp_, *ip_data_, *ip_cq_, *options_, "");
    330390
    331391      if( jnlst_->ProduceOutput(J_DETAILED, J_MAIN) ) {
     
    337397
    338398      // Run the algorithm
    339       SolverReturn status = alg->Optimize();
     399      SolverReturn status = alg_->Optimize();
    340400
    341401      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    342402                     "\nNumber of Iterations....: %d\n",
    343                      ip_data->iter_count());
     403                     ip_data_->iter_count());
    344404
    345405      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    346406                     "\n                                   (scaled)                 (unscaled)\n");
    347407      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    348                      "Objective...............: %24.16e  %24.16e\n", ip_cq->curr_f(), ip_cq->unscaled_curr_f());
    349       jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    350                      "Dual infeasibility......: %24.16e  %24.16e\n", ip_cq->curr_dual_infeasibility(NORM_MAX), ip_cq->unscaled_curr_dual_infeasibility(NORM_MAX));
    351       jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    352                      "Constraint violation....: %24.16e  %24.16e\n", ip_cq->curr_nlp_constraint_violation(NORM_MAX), ip_cq->unscaled_curr_nlp_constraint_violation(NORM_MAX));
    353       jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    354                      "Complementarity.........: %24.16e  %24.16e\n", ip_cq->curr_complementarity(0., NORM_MAX), ip_cq->unscaled_curr_complementarity(0., NORM_MAX));
    355       jnlst_->Printf(J_SUMMARY, J_SOLUTION,
    356                      "Overall NLP error.......: %24.16e  %24.16e\n\n", ip_cq->curr_nlp_error(), ip_cq->unscaled_curr_nlp_error());
    357 
    358       ip_data->curr()->x()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "x");
    359       ip_data->curr()->y_c()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "y_c");
    360       ip_data->curr()->y_d()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "y_d");
    361       ip_data->curr()->z_L()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "z_L");
    362       ip_data->curr()->z_U()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "z_U");
    363       ip_data->curr()->v_L()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "v_L");
    364       ip_data->curr()->v_U()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "v_U");
     408                     "Objective...............: %24.16e  %24.16e\n",
     409                     ip_cq_->curr_f(),
     410                     ip_cq_->unscaled_curr_f());
     411      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
     412                     "Dual infeasibility......: %24.16e  %24.16e\n",
     413                     ip_cq_->curr_dual_infeasibility(NORM_MAX),
     414                     ip_cq_->unscaled_curr_dual_infeasibility(NORM_MAX));
     415      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
     416                     "Constraint violation....: %24.16e  %24.16e\n",
     417                     ip_cq_->curr_nlp_constraint_violation(NORM_MAX),
     418                     ip_cq_->unscaled_curr_nlp_constraint_violation(NORM_MAX));
     419      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
     420                     "Complementarity.........: %24.16e  %24.16e\n",
     421                     ip_cq_->curr_complementarity(0., NORM_MAX),
     422                     ip_cq_->unscaled_curr_complementarity(0., NORM_MAX));
     423      jnlst_->Printf(J_SUMMARY, J_SOLUTION,
     424                     "Overall NLP error.......: %24.16e  %24.16e\n\n",
     425                     ip_cq_->curr_nlp_error(),
     426                     ip_cq_->unscaled_curr_nlp_error());
     427
     428      ip_data_->curr()->x()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "x");
     429      ip_data_->curr()->y_c()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "y_c");
     430      ip_data_->curr()->y_d()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "y_d");
     431      ip_data_->curr()->z_L()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "z_L");
     432      ip_data_->curr()->z_U()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "z_U");
     433      ip_data_->curr()->v_L()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "v_L");
     434      ip_data_->curr()->v_U()->Print(*jnlst_, J_VECTOR, J_SOLUTION, "v_U");
    365435
    366436      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    367437                     "\nNumber of objective function evaluations             = %d\n",
    368                      ip_nlp->f_evals());
     438                     ip_nlp_->f_evals());
    369439      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    370440                     "Number of objective gradient evaluations             = %d\n",
    371                      ip_nlp->grad_f_evals());
     441                     ip_nlp_->grad_f_evals());
    372442      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    373443                     "Number of equality constraint evaluations            = %d\n",
    374                      ip_nlp->c_evals());
     444                     ip_nlp_->c_evals());
    375445      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    376446                     "Number of inequality constraint evaluations          = %d\n",
    377                      ip_nlp->d_evals());
     447                     ip_nlp_->d_evals());
    378448      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    379449                     "Number of equality constraint Jacobian evaluations   = %d\n",
    380                      ip_nlp->jac_c_evals());
     450                     ip_nlp_->jac_c_evals());
    381451      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    382452                     "Number of inequality constraint Jacobian evaluations = %d\n",
    383                      ip_nlp->jac_d_evals());
     453                     ip_nlp_->jac_d_evals());
    384454      jnlst_->Printf(J_SUMMARY, J_STATISTICS,
    385455                     "Number of Lagrangian Hessian evaluations             = %d\n",
    386                      ip_nlp->h_evals());
     456                     ip_nlp_->h_evals());
    387457
    388458      // Write EXIT message
     
    416486        return retValue;
    417487      }
    418       ip_nlp->FinalizeSolution(status,
    419                                *ip_data->curr()->x(), *ip_data->curr()->z_L(), *ip_data->curr()->z_U(),
    420                                *ip_cq->curr_c(), *ip_cq->curr_d(), *ip_data->curr()->y_c(), *ip_data->curr()->y_d(),
    421                                ip_cq->curr_f());
     488      ip_nlp_->FinalizeSolution(status,
     489                                *ip_data_->curr()->x(),
     490                                *ip_data_->curr()->z_L(),
     491                                *ip_data_->curr()->z_U(),
     492                                *ip_cq_->curr_c(), *ip_cq_->curr_d(),
     493                                *ip_data_->curr()->y_c(),
     494                                *ip_data_->curr()->y_d(),
     495                                ip_cq_->curr_f());
    422496      // Create a SolveStatistics object
    423       statistics_ = new SolveStatistics(ip_nlp, ip_data, ip_cq);
     497      statistics_ = new SolveStatistics(ip_nlp_, ip_data_, ip_cq_);
    424498    }
    425499    catch(TOO_FEW_DOF& exc) {
     
    449523
    450524    return retValue;
    451 
    452525  }
    453526
  • branches/dev/Interfaces/IpIpoptApplication.hpp

    r516 r517  
    1818namespace Ipopt
    1919{
    20   /* Forward declaration */
     20  /* Forward declarations */
    2121  class NLP;
     22  class IpoptAlgorithm;
    2223
    2324  /* Return codes for the Optimize call for an application */
     
    3536    //@{
    3637    /** Solve a problem that inherits from TNLP */
    37     ApplicationReturnStatus OptimizeTNLP(const SmartPtr<TNLP>& nlp);
     38    ApplicationReturnStatus OptimizeTNLP(const SmartPtr<TNLP>& tnlp);
    3839
    3940    /** Solve a problem that inherits from NLP */
    4041    ApplicationReturnStatus OptimizeNLP(const SmartPtr<NLP>& nlp);
     42
     43    /** Solve a problem (that inherits from TNLP) for a repeated time.
     44     *  The OptimizeTNLP method must have been called before.  The
     45     *  TNLP must be the same object, and the structure (number of
     46     *  variables and constraints and position of nonzeros in Jacobian
     47     *  and Hessian must be the same). */
     48    ApplicationReturnStatus ReOptimizeTNLP(const SmartPtr<TNLP>& tnlp);
     49
     50    /** Solve a problem (that inherits from NLP) for a repeated time.
     51     *  The OptimizeNLP method must have been called before.  The
     52     *  NLP must be the same object, and the structure (number of
     53     *  variables and constraints and position of nonzeros in Jacobian
     54     *  and Hessian must be the same). */
     55    ApplicationReturnStatus ReOptimizeNLP(const SmartPtr<NLP>& nlp);
    4156    //@}
    4257
     
    94109    void RegisterAllOptions(const SmartPtr<RegisteredOptions>& roptions);
    95110
     111    /** Method for the actual optimize call of the Ipopt algorithm.
     112     *  This is used both for Optimize and ReOptimize */
     113    ApplicationReturnStatus call_optimize();
     114
    96115    /**@name Variables that customize the application behavior */
    97116    //@{
     
    110129    SmartPtr<SolveStatistics> statistics_;
    111130
     131    /** Object with the algorithm sceleton */
     132    SmartPtr<IpoptAlgorithm> alg_;
     133
     134    /** IpoptNLP Object for the NLP.  We keep this around for a
     135     *  ReOptimize warm start */
     136    SmartPtr<IpoptNLP> ip_nlp_;
     137
     138    /** IpoptData Object for the NLP.  We keep this around for a
     139     *  ReOptimize warm start */
     140    SmartPtr<IpoptData> ip_data_;
     141
     142    /** IpoptCalculatedQuantities Object for the NLP.  We keep this
     143     *  around for a ReOptimize warm start */
     144    SmartPtr<IpoptCalculatedQuantities> ip_cq_;
     145
    112146    /** Pointer for journals (which are also in the Journalist) so
    113147     *  that we can reset the printlevels if necessary before each
    114148     *  optimization. */
    115149    SmartPtr<Journal> stdout_jrnl_;
     150
     151    /** Pointer to the TNLPAdapter used to convert the TNLP to an NLP.
     152     *  We keep this around for the ReOptimizerTNLP call. */
     153    SmartPtr<NLP> nlp_adapter_;
    116154  };
    117155
  • branches/dev/Interfaces/IpTNLPAdapter.cpp

    r515 r517  
    4949      x_tag_for_jac_g_(0),
    5050      jac_idx_map_(NULL),
    51       h_idx_map_(NULL)
     51      h_idx_map_(NULL),
     52      x_fixed_map_(NULL)
    5253  {
    5354    ASSERT_EXCEPTION(IsValid(tnlp_), INVALID_TNLP,
     
    5859  {
    5960    delete [] full_x_;
    60     full_x_ = NULL;
    6161    delete [] full_lambda_;
    62     full_lambda_ = NULL;
    6362    delete [] full_g_;
    64     full_g_ = NULL;
    6563    delete [] jac_g_;
    66     jac_g_ = NULL;
    6764    delete [] c_rhs_;
    68     c_rhs_ = NULL;
    6965    delete [] jac_idx_map_;
    70     jac_idx_map_ = NULL;
    7166    delete [] h_idx_map_;
    72     h_idx_map_ = NULL;
     67    delete [] x_fixed_map_;
    7368  }
    7469
     
    115110      "yes", "Print all derivatives",
    116111      "Determines erbosity of derivative checker.");
    117     roptions->SetRegisteringCategory("Uncategorized");
    118     roptions->AddLowerBoundedNumberOption("max_onesided_bound_slack", "???",
    119                                           0.0, false, 0.0);
    120112  }
    121113
     
    131123                     "Option \"nlp_lower_bound_inf\" must be smaller than \"nlp_upper_bound_inf\".");
    132124
    133     options.GetNumericValue("max_onesided_bound_slack", max_onesided_bound_slack_, prefix);
    134 
    135125    Index enum_int;
    136126    options.GetEnumValue("derivative_test", enum_int, prefix);
    137127    derivative_test_ = DerivativeTestEnum(enum_int);
    138     options.GetNumericValue("derivative_test_perturbation", derivative_test_perturbation_, prefix);
    139     options.GetNumericValue("derivative_test_tol", derivative_test_tol_, prefix);
    140     options.GetBoolValue("derivative_test_print_all", derivative_test_print_all_, prefix);
     128    options.GetNumericValue("derivative_test_perturbation",
     129                            derivative_test_perturbation_, prefix);
     130    options.GetNumericValue("derivative_test_tol",
     131                            derivative_test_tol_, prefix);
     132    options.GetBoolValue("derivative_test_print_all",
     133                         derivative_test_print_all_, prefix);
     134
     135    // The option warm_start_same_structure is registered by OrigIpoptNLP
     136    options.GetBoolValue("warm_start_same_structure",
     137                         warm_start_same_structure_, prefix);
    141138
    142139    // allow TNLP to process some options
     
    171168    }
    172169
     170    if (warm_start_same_structure_) {
     171      ASSERT_EXCEPTION(full_x_, INVALID_WARMSTART,
     172                       "warm_start_same_structure chosen, but TNLPAdapter is called for the first time.");
     173      if (IsValid(jnlst_)) {
     174        jnlst_->Printf(J_DETAILED, J_INITIALIZATION,
     175                       "Reusing previous information for warm start in TNLPAdapter.\n");
     176      }
     177    }
     178    else {
     179      // In case the Adapter has been used before, but this is not a
     180      // warm start, make sure we delete all previously allocated
     181      // memory
     182      delete [] full_x_;
     183      delete [] full_lambda_;
     184      delete [] full_g_;
     185      delete [] jac_g_;
     186      delete [] c_rhs_;
     187      delete [] jac_idx_map_;
     188      delete [] h_idx_map_;
     189      delete [] x_fixed_map_;
     190    }
     191
    173192    // Get the full dimensions of the problem
    174     TNLP::IndexStyleEnum index_style = TNLP::FORTRAN_STYLE;
    175     tnlp_->get_nlp_info(n_full_x_, n_full_g_, nz_full_jac_g_,
    176                         nz_full_h_, index_style);
    177 
    178     // create space to store vectors that are the full length of x
    179     full_x_ = new Number[n_full_x_];
    180     //    full_grad_x_ = new Number[n_full_x_];
    181 
    182     // create space to store vectors that area the full length of lambda
    183     full_lambda_ = new Number[n_full_g_];
    184 
    185     // create space to store vectors that are the full length of g
    186     full_g_ = new Number[n_full_g_];
    187 
    188     // allocate internal space to store the full jacobian
    189     jac_g_ = new Number[nz_full_jac_g_];
    190 
    191     //*********************************************************
    192     // Create the spaces and permutation spaces
    193     //*********************************************************
    194     /* Spaces for x, x_L, and x_U. We need to remove the fixed variables
    195      * and find out which bounds do not exist. */
    196     Number* x_l = new Number[n_full_x_];
    197     Number* x_u = new Number[n_full_x_];
    198     Number* g_l = new Number[n_full_g_];
    199     Number* g_u = new Number[n_full_g_];
    200     tnlp_->get_bounds_info(n_full_x_, x_l, x_u, n_full_g_, g_l, g_u);
    201 
    202     Index n_x_not_fixed = 0;
    203     Index n_x_fixed = 0;
    204     Index n_x_l = 0;
    205     Index n_x_u = 0;
    206     Index* x_not_fixed_map = new Index[n_full_x_];
    207     Index* x_l_map = new Index[n_full_x_];
    208     Index* x_u_map = new Index[n_full_x_];
    209 
    210     for (Index i=0; i<n_full_x_; i++) {
    211       Number lower_bound = x_l[i];
    212       Number upper_bound = x_u[i];
    213       if (lower_bound == upper_bound) {
    214         // Variable is fixed, remove it from the problem
    215         //      DBG_ASSERT(false && "ToDo: write code to handle fixed variables. For now, remove the fixed variable from the problem or ensure an interior.\n");
    216         n_x_fixed++;
    217         full_x_[i] = lower_bound;
    218       }
    219       else if (lower_bound > upper_bound) {
    220         char string[128];
    221         sprintf(string, "There are inconsistent bounds on variable %d: lower = %25.16e and upper = %25.16e.", i, lower_bound, upper_bound);
    222         THROW_EXCEPTION(INVALID_TNLP, string);
    223       }
    224       else {
    225         x_not_fixed_map[n_x_not_fixed] = i;
    226         if (lower_bound > nlp_lower_bound_inf_) {
    227           x_l_map[n_x_l] = n_x_not_fixed;
    228           n_x_l++;
    229           if (max_onesided_bound_slack_>0. &&
    230               upper_bound >= nlp_upper_bound_inf_) {
    231             // Add very large upper bound if only a lower bound is given
     193    TNLP::IndexStyleEnum index_style;
     194    Index n_full_x, n_full_g, nz_full_jac_g, nz_full_h;
     195    tnlp_->get_nlp_info(n_full_x, n_full_g, nz_full_jac_g,
     196                        nz_full_h, index_style);
     197    ASSERT_EXCEPTION(!warm_start_same_structure_ ||
     198                     (n_full_x == n_full_x_ &&
     199                      n_full_g == n_full_g_ &&
     200                      nz_full_jac_g == nz_full_jac_g_ &&
     201                      nz_full_h == nz_full_h_),
     202                     INVALID_WARMSTART,
     203                     "warm_start_same_structure chosen, but problem dimensions are different.");
     204    n_full_x_ = n_full_x;
     205    n_full_g_ = n_full_g;
     206    nz_full_jac_g_ = nz_full_jac_g;
     207    nz_full_h_ = nz_full_h;
     208
     209    if (!warm_start_same_structure_) {
     210      // create space to store vectors that are the full length of x
     211      full_x_ = new Number[n_full_x_];
     212
     213      // create space to store vectors that area the full length of lambda
     214      full_lambda_ = new Number[n_full_g_];
     215
     216      // create space to store vectors that are the full length of g
     217      full_g_ = new Number[n_full_g_];
     218
     219      // allocate internal space to store the full jacobian
     220      jac_g_ = new Number[nz_full_jac_g_];
     221
     222      //*********************************************************
     223      // Create the spaces and permutation spaces
     224      //*********************************************************
     225      /* Spaces for x, x_L, and x_U. We need to remove the fixed variables
     226       * and find out which bounds do not exist. */
     227      Number* x_l = new Number[n_full_x_];
     228      Number* x_u = new Number[n_full_x_];
     229      Number* g_l = new Number[n_full_g_];
     230      Number* g_u = new Number[n_full_g_];
     231      tnlp_->get_bounds_info(n_full_x_, x_l, x_u, n_full_g_, g_l, g_u);
     232
     233      Index n_x_not_fixed = 0;
     234      Index n_x_l = 0;
     235      Index n_x_u = 0;
     236      Index* x_not_fixed_map = new Index[n_full_x_];
     237      Index* x_fixed_map_tmp = new Index[n_full_x_];
     238      Index* x_l_map = new Index[n_full_x_];
     239      Index* x_u_map = new Index[n_full_x_];
     240      n_x_fixed_ = 0;
     241
     242      for (Index i=0; i<n_full_x_; i++) {
     243        Number lower_bound = x_l[i];
     244        Number upper_bound = x_u[i];
     245        if (lower_bound == upper_bound) {
     246          // Variable is fixed, remove it from the problem
     247          full_x_[i] = lower_bound;
     248          x_fixed_map_tmp[n_x_fixed_] = i;
     249          n_x_fixed_++;
     250        }
     251        else if (lower_bound > upper_bound) {
     252          char string[128];
     253          sprintf(string, "There are inconsistent bounds on variable %d: lower = %25.16e and upper = %25.16e.", i, lower_bound, upper_bound);
     254          THROW_EXCEPTION(INVALID_TNLP, string);
     255        }
     256        else {
     257          x_not_fixed_map[n_x_not_fixed] = i;
     258          if (lower_bound > nlp_lower_bound_inf_) {
     259            x_l_map[n_x_l] = n_x_not_fixed;
     260            n_x_l++;
     261          }
     262
     263          if (upper_bound < nlp_upper_bound_inf_) {
    232264            x_u_map[n_x_u] = n_x_not_fixed;
    233265            n_x_u++;
    234266          }
    235         }
    236 
    237         if (upper_bound < nlp_upper_bound_inf_) {
    238           x_u_map[n_x_u] = n_x_not_fixed;
    239           n_x_u++;
    240           if (max_onesided_bound_slack_>0. &&
    241               lower_bound <= nlp_lower_bound_inf_) {
    242             // Add very small lower bound if only a lower bound is given
    243             x_l_map[n_x_l] = n_x_not_fixed;
    244             n_x_l++;
     267          n_x_not_fixed++;
     268        }
     269      }
     270
     271      // If there are fixed variables, we keep their position around
     272      // for a possible warm start later
     273      if (n_x_fixed_>0) {
     274        x_fixed_map_ = new Index[n_x_fixed_];
     275        for (Index i=0; i<n_x_fixed_; i++) {
     276          x_fixed_map_[i] = x_fixed_map_tmp[i];
     277        }
     278      }
     279      else {
     280        x_fixed_map_ = NULL;
     281      }
     282      delete [] x_fixed_map_tmp;
     283
     284      x_space_ = new DenseVectorSpace(n_x_not_fixed);
     285      x_l_space_ = new DenseVectorSpace(n_x_l);
     286      x_u_space_ = new DenseVectorSpace(n_x_u);
     287
     288      P_x_full_x_space_ = new ExpansionMatrixSpace(n_full_x_, n_x_not_fixed, x_not_fixed_map);
     289      P_x_full_x_ = P_x_full_x_space_->MakeNewExpansionMatrix();
     290
     291      P_x_x_L_space_ = new ExpansionMatrixSpace(n_x_not_fixed, n_x_l, x_l_map);
     292      px_l_space_ = GetRawPtr(P_x_x_L_space_);
     293      P_x_x_L_ = P_x_x_L_space_->MakeNewExpansionMatrix();
     294      P_x_x_U_space_ = new ExpansionMatrixSpace(n_x_not_fixed, n_x_u, x_u_map);
     295      px_u_space_ = GetRawPtr(P_x_x_U_space_);
     296      P_x_x_U_ = P_x_x_U_space_->MakeNewExpansionMatrix();
     297
     298      delete [] x_l;
     299      x_l = NULL;
     300      delete [] x_u;
     301      x_u = NULL;
     302      delete [] x_not_fixed_map;
     303      x_not_fixed_map = NULL;
     304      delete [] x_l_map;
     305      x_l_map = NULL;
     306      delete [] x_u_map;
     307      x_u_map = NULL;
     308
     309      // Create the spaces for c and d
     310      // - includes the internal permutation matrices for
     311      //  full_g to c and d
     312      // - includes the permutation matrices for d_l and d_u
     313      // c(x) = (P_c)T * g(x)
     314      // d(x) = (P_d)T * g(x)
     315      // d_L = (P_d_L)T * (P_d)T * g_l
     316      // d_U = (P_d_U)T * (P_d)T * g_u
     317      Index n_c = 0;
     318      Index n_d = 0;
     319      Index n_d_l = 0;
     320      Index n_d_u = 0;
     321
     322      Index* c_map = new Index[n_full_g_]; // we do not know n_c yet!
     323      Index* d_map = new Index[n_full_g_]; // we do not know n_d yet!
     324      Index* d_l_map = new Index[n_full_g_]; // "
     325      Index* d_u_map = new Index[n_full_g_]; // "
     326      for (Index i=0; i<n_full_g_; i++) {
     327        Number lower_bound = g_l[i];
     328        Number upper_bound = g_u[i];
     329        if (lower_bound == upper_bound) {
     330          // equality constraint
     331          c_map[n_c] = i;
     332          n_c++;
     333        }
     334        else if (lower_bound > upper_bound) {
     335          char string[128];
     336          sprintf(string, "There are inconsistent bounds on constraint %d: lower = %25.16e and upper = %25.16e.", i, lower_bound, upper_bound);
     337          THROW_EXCEPTION(INVALID_TNLP, string);
     338        }
     339        else {
     340          // inequality constraint
     341          d_map[n_d] = i;
     342          if (lower_bound > nlp_lower_bound_inf_) {
     343            d_l_map[n_d_l] = n_d;
     344            n_d_l++;
    245345          }
    246         }
    247         n_x_not_fixed++;
    248       }
    249     }
    250 
    251 
    252     x_space = new DenseVectorSpace(n_x_not_fixed);
    253     x_l_space = new DenseVectorSpace(n_x_l);
    254     x_u_space = new DenseVectorSpace(n_x_u);
    255 
    256     P_x_full_x_space_ = new ExpansionMatrixSpace(n_full_x_, n_x_not_fixed, x_not_fixed_map);
    257     P_x_full_x_ = P_x_full_x_space_->MakeNewExpansionMatrix();
    258 
    259     P_x_x_L_space_ = new ExpansionMatrixSpace(n_x_not_fixed, n_x_l, x_l_map);
    260     px_l_space = GetRawPtr(P_x_x_L_space_);
    261     P_x_x_L_ = P_x_x_L_space_->MakeNewExpansionMatrix();
    262     P_x_x_U_space_ = new ExpansionMatrixSpace(n_x_not_fixed, n_x_u, x_u_map);
    263     px_u_space = GetRawPtr(P_x_x_U_space_);
    264     P_x_x_U_ = P_x_x_U_space_->MakeNewExpansionMatrix();
    265 
    266     delete [] x_l;
    267     x_l = NULL;
    268     delete [] x_u;
    269     x_u = NULL;
    270     delete [] x_not_fixed_map;
    271     x_not_fixed_map = NULL;
    272     delete [] x_l_map;
    273     x_l_map = NULL;
    274     delete [] x_u_map;
    275     x_u_map = NULL;
    276 
    277     // Create the spaces for c and d
    278     // - includes the internal permutation matrices for
    279     //  full_g to c and d
    280     // - includes the permutation matrices for d_l and d_u
    281     // c(x) = (P_c)T * g(x)
    282     // d(x) = (P_d)T * g(x)
    283     // d_L = (P_d_L)T * (P_d)T * g_l
    284     // d_U = (P_d_U)T * (P_d)T * g_u
    285     Index n_c = 0;
    286     Index n_d = 0;
    287     Index n_d_l = 0;
    288     Index n_d_u = 0;
    289 
    290     Index* c_map = new Index[n_full_g_]; // we do not know n_c yet!
    291     Index* d_map = new Index[n_full_g_]; // we do not know n_d yet!
    292     Index* d_l_map = new Index[n_full_g_]; // "
    293     Index* d_u_map = new Index[n_full_g_]; // "
    294     for (Index i=0; i<n_full_g_; i++) {
    295       Number lower_bound = g_l[i];
    296       Number upper_bound = g_u[i];
    297       if (lower_bound == upper_bound) {
    298         // equality constraint
    299         c_map[n_c] = i;
    300         n_c++;
    301       }
    302       else if (lower_bound > upper_bound) {
    303         char string[128];
    304         sprintf(string, "There are inconsistent bounds on constraint %d: lower = %25.16e and upper = %25.16e.", i, lower_bound, upper_bound);
    305         THROW_EXCEPTION(INVALID_TNLP, string);
    306       }
    307       else {
    308         // inequality constraint
    309         d_map[n_d] = i;
    310         if (lower_bound > nlp_lower_bound_inf_) {
    311           d_l_map[n_d_l] = n_d;
    312           n_d_l++;
    313           if (max_onesided_bound_slack_>0. &&
    314               upper_bound >= nlp_upper_bound_inf_) {
    315             // Add very large upper bound if only a lower bound is given
     346          if (upper_bound < nlp_upper_bound_inf_) {
    316347            d_u_map[n_d_u] = n_d;
    317348            n_d_u++;
    318349          }
    319         }
    320         if (upper_bound < nlp_upper_bound_inf_) {
    321           d_u_map[n_d_u] = n_d;
    322           n_d_u++;
    323           if (max_onesided_bound_slack_>0. &&
    324               lower_bound <= nlp_lower_bound_inf_) {
    325             // Add very small lower bound if only a lower bound is given
    326             d_l_map[n_d_l] = n_d;
    327             n_d_l++;
    328           }
    329         }
    330         n_d++;
    331       }
    332     }
    333 
    334     // create the required c_space
    335     SmartPtr<DenseVectorSpace> dc_space = new DenseVectorSpace(n_c);
    336     c_rhs_ = new Number[n_c];
    337     c_space = GetRawPtr(dc_space);
    338     // create the internal expansion matrix for c to g
    339     P_c_g_space_ = new ExpansionMatrixSpace(n_full_g_, n_c, c_map);
    340     P_c_g_ = P_c_g_space_->MakeNewExpansionMatrix();
    341     delete [] c_map;
    342     c_map = NULL;
    343 
    344     // create the required d_space
    345     d_space = new DenseVectorSpace(n_d);
    346     // create the internal expansion matrix for d to g
    347     P_d_g_space_ = new ExpansionMatrixSpace(n_full_g_, n_d, d_map);
    348     P_d_g_ = P_d_g_space_->MakeNewExpansionMatrix();
    349     delete [] d_map;
    350     d_map = NULL;
    351 
    352     // create the required d_l space
    353     d_l_space = new DenseVectorSpace(n_d_l);
    354     // create the required expansion matrix for d_L to d_L_exp
    355     pd_l_space = new ExpansionMatrixSpace(n_d, n_d_l, d_l_map);
    356     delete [] d_l_map;
    357     d_l_map = NULL;
    358 
    359     // create the required d_u space
    360     d_u_space = new DenseVectorSpace(n_d_u);
    361     // create the required expansion matrix for d_U to d_U_exp
    362     pd_u_space = new ExpansionMatrixSpace(n_d, n_d_u, d_u_map);
    363     delete [] d_u_map;
    364     d_u_map = NULL;
    365 
    366     delete [] g_l;
    367     g_l = NULL;
    368     delete [] g_u;
    369     g_u = NULL;
    370 
    371     /** Create the matrix space for the jacobians
    372      */
    373     // Get the non zero structure
    374     Index* g_iRow = new Index[nz_full_jac_g_];
    375     Index* g_jCol = new Index[nz_full_jac_g_];
    376     tnlp_->eval_jac_g(n_full_x_, NULL, false, n_full_g_, nz_full_jac_g_,
    377                       g_iRow, g_jCol, NULL);
    378 
    379     if (index_style != TNLP::FORTRAN_STYLE) {
     350          n_d++;
     351        }
     352      }
     353
     354      // create the required c_space
     355      SmartPtr<DenseVectorSpace> dc_space = new DenseVectorSpace(n_c);
     356      c_rhs_ = new Number[n_c];
     357      c_space_ = GetRawPtr(dc_space);
     358      // create the internal expansion matrix for c to g
     359      P_c_g_space_ = new ExpansionMatrixSpace(n_full_g_, n_c, c_map);
     360      P_c_g_ = P_c_g_space_->MakeNewExpansionMatrix();
     361      delete [] c_map;
     362      c_map = NULL;
     363
     364      // create the required d_space
     365      d_space_ = new DenseVectorSpace(n_d);
     366      // create the internal expansion matrix for d to g
     367      P_d_g_space_ = new ExpansionMatrixSpace(n_full_g_, n_d, d_map);
     368      P_d_g_ = P_d_g_space_->MakeNewExpansionMatrix();
     369      delete [] d_map;
     370      d_map = NULL;
     371
     372      // create the required d_l space
     373      d_l_space_ = new DenseVectorSpace(n_d_l);
     374      // create the required expansion matrix for d_L to d_L_exp
     375      pd_l_space_ = new ExpansionMatrixSpace(n_d, n_d_l, d_l_map);
     376      delete [] d_l_map;
     377      d_l_map = NULL;
     378
     379      // create the required d_u space
     380      d_u_space_ = new DenseVectorSpace(n_d_u);
     381      // create the required expansion matrix for d_U to d_U_exp
     382      pd_u_space_ = new ExpansionMatrixSpace(n_d, n_d_u, d_u_map);
     383      delete [] d_u_map;
     384      d_u_map = NULL;
     385
     386      delete [] g_l;
     387      g_l = NULL;
     388      delete [] g_u;
     389      g_u = NULL;
     390
     391      /** Create the matrix space for the jacobians
     392       */
     393      // Get the non zero structure
     394      Index* g_iRow = new Index[nz_full_jac_g_];
     395      Index* g_jCol = new Index[nz_full_jac_g_];
     396      tnlp_->eval_jac_g(n_full_x_, NULL, false, n_full_g_, nz_full_jac_g_,
     397                        g_iRow, g_jCol, NULL);
     398
     399      if (index_style != TNLP::FORTRAN_STYLE) {
     400        for (Index i=0; i<nz_full_jac_g_; i++) {
     401          g_iRow[i] += 1;
     402          g_jCol[i] += 1;
     403        }
     404      }
     405      const Index* c_col_pos = P_x_full_x_->CompressedPosIndices();
     406      const Index* c_row_pos = P_c_g_->CompressedPosIndices();
     407
     408      // ... build the non-zero structure for jac_c
     409      // ... (the permutation from rows in jac_g to jac_c is
     410      // ...  the same as P_c_g_)
     411      jac_idx_map_ = new Index[nz_full_jac_g_];
     412      Index* jac_c_iRow = new Index[nz_full_jac_g_];
     413      Index* jac_c_jCol = new Index[nz_full_jac_g_];
     414      Index current_nz = 0;
    380415      for (Index i=0; i<nz_full_jac_g_; i++) {
    381         g_iRow[i] += 1;
    382         g_jCol[i] += 1;
    383       }
    384     }
    385     const Index* c_col_pos = P_x_full_x_->CompressedPosIndices();
    386     const Index* c_row_pos = P_c_g_->CompressedPosIndices();
    387 
    388     // ... build the non-zero structure for jac_c
    389     // ... (the permutation from rows in jac_g to jac_c is
    390     // ...  the same as P_c_g_)
    391     jac_idx_map_ = new Index[nz_full_jac_g_];
    392     Index* jac_c_iRow = new Index[nz_full_jac_g_];
    393     Index* jac_c_jCol = new Index[nz_full_jac_g_];
    394     Index current_nz = 0;
    395     for (Index i=0; i<nz_full_jac_g_; i++) {
    396       const Index c_row = c_row_pos[g_iRow[i]-1];
    397       const Index c_col = c_col_pos[g_jCol[i]-1];
    398       if (c_col != -1 && c_row != -1) {
    399         jac_idx_map_[current_nz] = i;
    400         jac_c_iRow[current_nz] = c_row + 1;
    401         jac_c_jCol[current_nz] = c_col + 1;
    402         current_nz++;
    403       }
    404     }
    405     nz_jac_c_ = current_nz;
    406     Jac_c_space = new GenTMatrixSpace(n_c, n_x_not_fixed, nz_jac_c_, jac_c_iRow, jac_c_jCol);
    407     delete [] jac_c_iRow;
    408     jac_c_iRow = NULL;
    409     delete [] jac_c_jCol;
    410     jac_c_jCol = NULL;
    411 
    412     const Index* d_col_pos = P_x_full_x_->CompressedPosIndices();
    413     const Index* d_row_pos = P_d_g_->CompressedPosIndices();
    414     // ... build the nonzero structure for jac_d
    415     // ... (the permuation from rows in jac_g to jac_c is the
    416     // ...  the same as P_d_g_)
    417     Index* jac_d_iRow = new Index[nz_full_jac_g_];
    418     Index* jac_d_jCol = new Index[nz_full_jac_g_];
    419     current_nz = 0;
    420     for (Index i=0; i<nz_full_jac_g_; i++) {
    421       const Index d_row = d_row_pos[g_iRow[i]-1];
    422       const Index d_col = d_col_pos[g_jCol[i]-1];
    423       if (d_col != -1 && d_row != -1) {
    424         jac_idx_map_[current_nz + nz_jac_c_] = i;
    425         jac_d_iRow[current_nz] = d_row + 1;
    426         jac_d_jCol[current_nz] = d_col + 1;
    427         current_nz++;
    428       }
    429     }
    430     nz_jac_d_ = current_nz;
    431     Jac_d_space = new GenTMatrixSpace(n_d, n_x_not_fixed, nz_jac_d_, jac_d_iRow, jac_d_jCol);
    432     delete [] jac_d_iRow;
    433     jac_d_iRow = NULL;
    434     delete [] jac_d_jCol;
    435     jac_d_jCol = NULL;
    436 
    437     delete [] g_iRow;
    438     g_iRow = NULL;
    439     delete [] g_jCol;
    440     g_jCol = NULL;
    441 
    442     /** Create the matrix space for the hessian of the lagrangian */
    443     Index* full_h_iRow = new Index[nz_full_h_];
    444     Index* full_h_jCol = new Index[nz_full_h_];
    445     Index* h_iRow = new Index[nz_full_h_];
    446     Index* h_jCol = new Index[nz_full_h_];
    447     tnlp_->eval_h(n_full_x_, NULL, false, 0, n_full_g_, NULL, false,
    448                   nz_full_h_, full_h_iRow, full_h_jCol, NULL);
    449 
    450     if (index_style != TNLP::FORTRAN_STYLE) {
     416        const Index c_row = c_row_pos[g_iRow[i]-1];
     417        const Index c_col = c_col_pos[g_jCol[i]-1];
     418        if (c_col != -1 && c_row != -1) {
     419          jac_idx_map_[current_nz] = i;
     420          jac_c_iRow[current_nz] = c_row + 1;
     421          jac_c_jCol[current_nz] = c_col + 1;
     422          current_nz++;
     423        }
     424      }
     425      nz_jac_c_ = current_nz;
     426      Jac_c_space_ = new GenTMatrixSpace(n_c, n_x_not_fixed, nz_jac_c_, jac_c_iRow, jac_c_jCol);
     427      delete [] jac_c_iRow;
     428      jac_c_iRow = NULL;
     429      delete [] jac_c_jCol;
     430      jac_c_jCol = NULL;
     431
     432      const Index* d_col_pos = P_x_full_x_->CompressedPosIndices();
     433      const Index* d_row_pos = P_d_g_->CompressedPosIndices();
     434      // ... build the nonzero structure for jac_d
     435      // ... (the permuation from rows in jac_g to jac_c is the
     436      // ...  the same as P_d_g_)
     437      Index* jac_d_iRow = new Index[nz_full_jac_g_];
     438      Index* jac_d_jCol = new Index[nz_full_jac_g_];
     439      current_nz = 0;
     440      for (Index i=0; i<nz_full_jac_g_; i++) {
     441        const Index d_row = d_row_pos[g_iRow[i]-1];
     442        const Index d_col = d_col_pos[g_jCol[i]-1];
     443        if (d_col != -1 && d_row != -1) {
     444          jac_idx_map_[current_nz + nz_jac_c_] = i;
     445          jac_d_iRow[current_nz] = d_row + 1;
     446          jac_d_jCol[current_nz] = d_col + 1;
     447          current_nz++;
     448        }
     449      }
     450      nz_jac_d_ = current_nz;
     451      Jac_d_space_ = new GenTMatrixSpace(n_d, n_x_not_fixed, nz_jac_d_, jac_d_iRow, jac_d_jCol);
     452      delete [] jac_d_iRow;
     453      jac_d_iRow = NULL;
     454      delete [] jac_d_jCol;
     455      jac_d_jCol = NULL;
     456
     457      delete [] g_iRow;
     458      g_iRow = NULL;
     459      delete [] g_jCol;
     460      g_jCol = NULL;
     461
     462      /** Create the matrix space for the hessian of the lagrangian */
     463      Index* full_h_iRow = new Index[nz_full_h_];
     464      Index* full_h_jCol = new Index[nz_full_h_];
     465      Index* h_iRow = new Index[nz_full_h_];
     466      Index* h_jCol = new Index[nz_full_h_];
     467      tnlp_->eval_h(n_full_x_, NULL, false, 0, n_full_g_, NULL, false,
     468                    nz_full_h_, full_h_iRow, full_h_jCol, NULL);
     469
     470      if (index_style != TNLP::FORTRAN_STYLE) {
     471        for (Index i=0; i<nz_full_h_; i++) {
     472          full_h_iRow[i] += 1;
     473          full_h_jCol[i] += 1;
     474        }
     475      }
     476
     477      h_idx_map_ = new Index[nz_full_h_];
     478      const Index* h_pos = P_x_full_x_->CompressedPosIndices();
     479      current_nz = 0;
    451480      for (Index i=0; i<nz_full_h_; i++) {
    452         full_h_iRow[i] += 1;
    453         full_h_jCol[i] += 1;
    454       }
    455     }
    456 
    457     h_idx_map_ = new Index[nz_full_h_];
    458     const Index* h_pos = P_x_full_x_->CompressedPosIndices();
    459     current_nz = 0;
    460     for (Index i=0; i<nz_full_h_; i++) {
    461       const Index h_row = h_pos[full_h_iRow[i]-1];
    462       const Index h_col = h_pos[full_h_jCol[i]-1];
    463       if (h_row != -1 && h_col != -1) {
    464         h_idx_map_[current_nz] = i;
    465         h_iRow[current_nz] = h_row + 1;
    466         h_jCol[current_nz] = h_col + 1;
    467         current_nz++;
    468       }
    469     }
    470     nz_h_ = current_nz;
    471     Hess_lagrangian_space = new SymTMatrixSpace(n_x_not_fixed, nz_h_, h_iRow, h_jCol);
    472     delete [] full_h_iRow;
    473     full_h_iRow = NULL;
    474     delete [] full_h_jCol;
    475     full_h_jCol = NULL;
    476     delete [] h_iRow;
    477     h_iRow = NULL;
    478     delete [] h_jCol;
    479     h_jCol = NULL;
     481        const Index h_row = h_pos[full_h_iRow[i]-1];
     482        const Index h_col = h_pos[full_h_jCol[i]-1];
     483        if (h_row != -1 && h_col != -1) {
     484          h_idx_map_[current_nz] = i;
     485          h_iRow[current_nz] = h_row + 1;
     486          h_jCol[current_nz] = h_col + 1;
     487          current_nz++;
     488        }
     489      }
     490      nz_h_ = current_nz;
     491      Hess_lagrangian_space_ = new SymTMatrixSpace(n_x_not_fixed, nz_h_, h_iRow, h_jCol);
     492      delete [] full_h_iRow;
     493      full_h_iRow = NULL;
     494      delete [] full_h_jCol;
     495      full_h_jCol = NULL;
     496      delete [] h_iRow;
     497      h_iRow = NULL;
     498      delete [] h_jCol;
     499      h_jCol = NULL;
     500    } /* if (warm_start_same_structure_) { */
     501
     502    // Assign the spaces to the returned pointers
     503    x_space = x_space_;
     504    c_space = c_space_;
     505    d_space = d_space_;
     506    x_l_space = x_l_space_;
     507    px_l_space = px_l_space_;
     508    x_u_space = x_u_space_;
     509    px_u_space = px_u_space_;
     510    d_l_space = d_l_space_;
     511    pd_l_space = pd_l_space_;
     512    d_u_space = d_u_space_;
     513    pd_u_space = pd_u_space_;
     514    Jac_c_space = Jac_c_space_;
     515    Jac_d_space = Jac_d_space_;
     516    Hess_lagrangian_space = Hess_lagrangian_space_;
    480517
    481518    return true;
     
    494531    // once to setup the structure for the problem, I could store the values
    495532    // and use them here ?
     533    // Actually, this is better for a warm start
    496534    Number* x_l = new Number[n_full_x_];
    497535    Number* x_u = new Number[n_full_x_];
     
    499537    Number* g_u = new Number[n_full_g_];
    500538    tnlp_->get_bounds_info(n_full_x_, x_l, x_u, n_full_g_, g_l, g_u);
     539
     540    // Set the values of fixed variables
     541    for (Index i=0; i<n_x_fixed_; i++) {
     542      DBG_ASSERT(x_l[x_fixed_map_[i]] == x_u[x_fixed_map_[i]]);
     543      full_x_[x_fixed_map_[i]] = x_l[x_fixed_map_[i]];
     544    }
    501545
    502546    // Set the bounds values for x
     
    510554      Index full_idx = P_x_full_x_->ExpandedPosIndices()[ipopt_idx];
    511555      Number lower_bound = x_l[full_idx];
    512       if (lower_bound <= nlp_lower_bound_inf_) {
    513         DBG_ASSERT(max_onesided_bound_slack_>0.);
    514         DBG_ASSERT(x_u[full_idx] < nlp_upper_bound_inf_);
    515         values[i] = x_u[full_idx] - max_onesided_bound_slack_;
    516       }
    517       else {
    518         values[i] = lower_bound;
    519       }
     556      values[i] = lower_bound;
    520557    }
    521558
     
    529566      Index full_idx = P_x_full_x_->ExpandedPosIndices()[ipopt_idx];
    530567      Number upper_bound = x_u[full_idx];
    531       if (upper_bound >= nlp_upper_bound_inf_) {
    532         DBG_ASSERT(max_onesided_bound_slack_>0.);
    533         DBG_ASSERT(x_l[full_idx] > nlp_lower_bound_inf_);
    534         values[i] = x_l[full_idx] + max_onesided_bound_slack_;
    535       }
    536       else {
    537         values[i] = upper_bound;
    538       }
     568      values[i] = upper_bound;
    539569    }
    540570
     
    558588      Index full_idx = P_d_g_->ExpandedPosIndices()[d_exp_idx];
    559589      Number lower_bound = g_l[full_idx];
    560       if (lower_bound <= nlp_lower_bound_inf_) {
    561         DBG_ASSERT(max_onesided_bound_slack_>0.);
    562         DBG_ASSERT(g_u[full_idx] < nlp_upper_bound_inf_);
    563         values[i] = g_u[full_idx] - max_onesided_bound_slack_;
    564       }
    565       else {
    566         values[i] = lower_bound;
    567       }
     590      values[i] = lower_bound;
    568591    }
    569592
     
    577600      Index full_idx = P_d_g_->ExpandedPosIndices()[d_exp_idx];
    578601      Number upper_bound = g_u[full_idx];
    579       if (upper_bound >= nlp_upper_bound_inf_) {
    580         DBG_ASSERT(max_onesided_bound_slack_>0.);
    581         DBG_ASSERT(g_l[full_idx] > nlp_lower_bound_inf_);
    582         values[i] = g_l[full_idx] + max_onesided_bound_slack_;
    583       }
    584       else {
    585         values[i] = upper_bound;
    586       }
     602      values[i] = upper_bound;
    587603    }
    588604
     
    11281144
    11291145    ASSERT_EXCEPTION(IsValid(jnlst_), ERROR_IN_TNLP_DERIVATIVE_TEST,
    1130                      "No Journalist given to TNLPAdapter.  Need Journalist, otherwise can't produce any output!");
     1146                     "No Journalist given to TNLPAdapter.  Need Journalist, otherwise can't produce any output in DerivativeChecker!");
    11311147
    11321148    bool retval = true;
     
    11401156    Index nz_jac_g; // number of nonzeros in constraint Jacobian
    11411157    Index nz_hess_lag; // number of nonzeros in Lagrangian Hessian
    1142     TNLP::IndexStyleEnum index_style = TNLP::FORTRAN_STYLE;
     1158    TNLP::IndexStyleEnum index_style;
    11431159    tnlp_->get_nlp_info(nx, ng, nz_jac_g, nz_hess_lag, index_style);
    11441160
  • branches/dev/Interfaces/IpTNLPAdapter.hpp

    r515 r517  
    151151    bool CheckDerivatives(DerivativeTestEnum deriv_test);
    152152
    153     /** Methods for IpoptType */
     153    /** @name Methods for IpoptType */
    154154    //@{
    155155    static void RegisterOptions(SmartPtr<RegisteredOptions> roptions);
    156156    //@}
    157157
     158    /** Accessor method for the underlying TNLP. */
     159    SmartPtr<TNLP> tnlp() const
     160    {
     161      return tnlp_;
     162    }
    158163
    159164  private:
     
    186191    /** Value for a upper bound that denotes infinity */
    187192    Number nlp_upper_bound_inf_;
    188     /** Maximal slack for one-sidedly bounded variables.  If a
     193    /* Maximal slack for one-sidedly bounded variables.  If a
    189194     *  variable has only one bound, say a lower bound xL, then an
    190195     *  upper bound xL + max_onesided_bound_slack_.  If this value is
    191196     *  zero, no upper bound is added. */
    192     Number max_onesided_bound_slack_;
     197    /* Took this out:  Number max_onesided_bound_slack_; */
    193198    /** Enum indicating whether and which derivative test should be
    194199     *  performed at starting point. */
     
    202207     *  those violating the threshold. */
    203208    bool derivative_test_print_all_;
     209    /** Flag indicating whether the TNLP with identical structure has
     210     *  already been solved before. */
     211    bool warm_start_same_structure_;
    204212    //@}
    205213
    206214    /**@name Problem Size Data */
    207215    //@{
    208     Index n_full_x_; /** full dimension of x (fixed + non-fixed) */
    209     Index n_full_g_; /** full dimension of g (c + d) */
    210     Index nz_jac_c_; /** non-zeros of the jacobian of c */
    211     Index nz_jac_d_; /** non-zeros of the jacobian of d */
    212     Index nz_full_jac_g_; /** number of non-zeros in full-size Jacobian of g */
    213     Index nz_full_h_; /** number of non-zeros in full-size Hessian */
    214     Index nz_h_;     /** number of non-zeros in the non-fixed-size Hessian */
     216    /** full dimension of x (fixed + non-fixed) */
     217    Index n_full_x_;
     218    /** full dimension of g (c + d) */
     219    Index n_full_g_;
     220    /** non-zeros of the jacobian of c */
     221    Index nz_jac_c_;
     222    /** non-zeros of the jacobian of d */
     223    Index nz_jac_d_;
     224    /** number of non-zeros in full-size Jacobian of g */
     225    Index nz_full_jac_g_;
     226    /** number of non-zeros in full-size Hessian */
     227    Index nz_full_h_;
     228    /** number of non-zeros in the non-fixed-size Hessian */
     229    Index nz_h_;
     230    /** Number of fixed variables */
     231    Index n_x_fixed_;
     232    //@}
     233
     234    /** @name Local copy of spaces (for warm start) */
     235    //@{
     236    SmartPtr<const VectorSpace> x_space_;
     237    SmartPtr<const VectorSpace> c_space_;
     238    SmartPtr<const VectorSpace> d_space_;
     239    SmartPtr<const VectorSpace> x_l_space_;
     240    SmartPtr<const MatrixSpace> px_l_space_;
     241    SmartPtr<const VectorSpace> x_u_space_;
     242    SmartPtr<const MatrixSpace> px_u_space_;
     243    SmartPtr<const VectorSpace> d_l_space_;
     244    SmartPtr<const MatrixSpace> pd_l_space_;
     245    SmartPtr<const VectorSpace> d_u_space_;
     246    SmartPtr<const MatrixSpace> pd_u_space_;
     247    SmartPtr<const MatrixSpace> Jac_c_space_;
     248    SmartPtr<const MatrixSpace> Jac_d_space_;
     249    SmartPtr<const SymMatrixSpace> Hess_lagrangian_space_;
    215250    //@}
    216251
     
    271306    Index* jac_idx_map_;
    272307    Index* h_idx_map_;
     308
     309    /** Position of fixed variables. This is required for a warm start */
     310    Index* x_fixed_map_;
    273311    //@}
    274312  };
Note: See TracChangeset for help on using the changeset viewer.