1 | // Copyright (C) 2004, 2005 International Business Machines and others. |
---|
2 | // All Rights Reserved. |
---|
3 | // This code is published under the Common Public License. |
---|
4 | // |
---|
5 | // $Id: IpOrigIpoptNLP.cpp 526 2005-09-27 23:03:20Z andreasw $ |
---|
6 | // |
---|
7 | // Authors: Carl Laird, Andreas Waechter IBM 2004-08-13 |
---|
8 | |
---|
9 | #include "IpOrigIpoptNLP.hpp" |
---|
10 | |
---|
11 | #ifdef HAVE_CMATH |
---|
12 | # include <cmath> |
---|
13 | #else |
---|
14 | # ifdef HAVE_MATH_H |
---|
15 | # include <math.h> |
---|
16 | # else |
---|
17 | # error "don't have header file for math" |
---|
18 | # endif |
---|
19 | #endif |
---|
20 | |
---|
21 | #ifdef HAVE_CASSERT |
---|
22 | # include <cassert> |
---|
23 | #else |
---|
24 | # ifdef HAVE_ASSERT_H |
---|
25 | # include <assert.h> |
---|
26 | # else |
---|
27 | # error "don't have header file for assert" |
---|
28 | # endif |
---|
29 | #endif |
---|
30 | |
---|
31 | namespace Ipopt |
---|
32 | { |
---|
33 | #ifdef IP_DEBUG |
---|
34 | static const Index dbg_verbosity = 0; |
---|
35 | #endif |
---|
36 | |
---|
37 | OrigIpoptNLP::OrigIpoptNLP(const SmartPtr<const Journalist>& jnlst, |
---|
38 | const SmartPtr<NLP>& nlp, |
---|
39 | const SmartPtr<NLPScalingObject>& nlp_scaling) |
---|
40 | : |
---|
41 | IpoptNLP(nlp_scaling), |
---|
42 | jnlst_(jnlst), |
---|
43 | nlp_(nlp), |
---|
44 | x_space_(NULL), |
---|
45 | f_cache_(1), |
---|
46 | grad_f_cache_(1), |
---|
47 | c_cache_(1), |
---|
48 | jac_c_cache_(1), |
---|
49 | d_cache_(1), |
---|
50 | jac_d_cache_(1), |
---|
51 | h_cache_(1), |
---|
52 | f_evals_(0), |
---|
53 | grad_f_evals_(0), |
---|
54 | c_evals_(0), |
---|
55 | jac_c_evals_(0), |
---|
56 | d_evals_(0), |
---|
57 | jac_d_evals_(0), |
---|
58 | h_evals_(0), |
---|
59 | initialized_(false) |
---|
60 | {} |
---|
61 | |
---|
62 | OrigIpoptNLP::~OrigIpoptNLP() |
---|
63 | {} |
---|
64 | |
---|
65 | void OrigIpoptNLP::RegisterOptions(SmartPtr<RegisteredOptions> roptions) |
---|
66 | { |
---|
67 | roptions->AddLowerBoundedNumberOption( |
---|
68 | "bound_relax_factor", |
---|
69 | "Factor for initial relaxation of the bounds.", |
---|
70 | 0, false, |
---|
71 | 1e-8, |
---|
72 | "Before start of the optimization, the bounds given by the user are " |
---|
73 | "relaxed. This option sets the factor for this relaxation. If it " |
---|
74 | "is set to zero, then then bounds relaxation is disabled. " |
---|
75 | "(See Eqn.(35) in implmentation paper.)"); |
---|
76 | roptions->AddStringOption2( |
---|
77 | "warm_start_same_structure", |
---|
78 | "Indicates whether a problem with a structure identical to the previous one is to be solved.", |
---|
79 | "no", |
---|
80 | "no", "Assume this is a new problem.", |
---|
81 | "yes", "Assume this is problem has known structure", |
---|
82 | "If \"yes\" is chosen, then the algorithm assumes that an NLP is now to " |
---|
83 | "be solved, whose strcture is identical to one that already was " |
---|
84 | "considered (with the same NLP object)."); |
---|
85 | } |
---|
86 | |
---|
87 | bool OrigIpoptNLP::Initialize(const Journalist& jnlst, |
---|
88 | const OptionsList& options, |
---|
89 | const std::string& prefix) |
---|
90 | { |
---|
91 | options.GetNumericValue("bound_relax_factor", bound_relax_factor_, prefix); |
---|
92 | options.GetBoolValue("warm_start_same_structure", |
---|
93 | warm_start_same_structure_, prefix); |
---|
94 | |
---|
95 | // Reset the function evaluation counters (for warm start) |
---|
96 | f_evals_=0; |
---|
97 | grad_f_evals_=0; |
---|
98 | c_evals_=0; |
---|
99 | jac_c_evals_=0; |
---|
100 | d_evals_=0; |
---|
101 | jac_d_evals_=0; |
---|
102 | h_evals_=0; |
---|
103 | |
---|
104 | // Reset the cache entries belonging to a dummy dependency. This |
---|
105 | // is required for repeated solve, since the cache is not updated |
---|
106 | // if a dimension is zero |
---|
107 | std::vector<const TaggedObject*> deps(1); |
---|
108 | deps[0] = NULL; |
---|
109 | std::vector<Number> sdeps(0); |
---|
110 | c_cache_.InvalidateResult(deps, sdeps); |
---|
111 | d_cache_.InvalidateResult(deps, sdeps); |
---|
112 | jac_c_cache_.InvalidateResult(deps, sdeps); |
---|
113 | jac_d_cache_.InvalidateResult(deps, sdeps); |
---|
114 | |
---|
115 | if (!nlp_->ProcessOptions(options, prefix)) { |
---|
116 | return false; |
---|
117 | } |
---|
118 | |
---|
119 | initialized_ = true; |
---|
120 | return IpoptNLP::Initialize(jnlst, options, prefix); |
---|
121 | } |
---|
122 | |
---|
123 | bool OrigIpoptNLP::InitializeStructures(SmartPtr<Vector>& x, |
---|
124 | bool init_x, |
---|
125 | SmartPtr<Vector>& y_c, |
---|
126 | bool init_y_c, |
---|
127 | SmartPtr<Vector>& y_d, |
---|
128 | bool init_y_d, |
---|
129 | SmartPtr<Vector>& z_L, |
---|
130 | bool init_z_L, |
---|
131 | SmartPtr<Vector>& z_U, |
---|
132 | bool init_z_U, |
---|
133 | SmartPtr<Vector>& v_L, |
---|
134 | SmartPtr<Vector>& v_U |
---|
135 | ) |
---|
136 | { |
---|
137 | DBG_ASSERT(initialized_); |
---|
138 | bool retValue; |
---|
139 | |
---|
140 | if (!warm_start_same_structure_) { |
---|
141 | |
---|
142 | retValue = nlp_->GetSpaces(x_space_, c_space_, d_space_, |
---|
143 | x_l_space_, px_l_space_, |
---|
144 | x_u_space_, px_u_space_, |
---|
145 | d_l_space_, pd_l_space_, |
---|
146 | d_u_space_, pd_u_space_, |
---|
147 | jac_c_space_, jac_d_space_, |
---|
148 | h_space_); |
---|
149 | |
---|
150 | if (!retValue) { |
---|
151 | return false; |
---|
152 | } |
---|
153 | |
---|
154 | NLP_scaling()->DetermineScaling(x_space_, |
---|
155 | c_space_, d_space_, |
---|
156 | jac_c_space_, jac_d_space_, |
---|
157 | h_space_, |
---|
158 | scaled_jac_c_space_, scaled_jac_d_space_, |
---|
159 | scaled_h_space_); |
---|
160 | |
---|
161 | ASSERT_EXCEPTION(x_space_->Dim() >= c_space_->Dim(), TOO_FEW_DOF, |
---|
162 | "Too few degrees of freedom!"); |
---|
163 | |
---|
164 | // cannot have any null pointers, want zero length vectors |
---|
165 | // instead of null - this will later need to be changed for _h; |
---|
166 | retValue = (IsValid(x_space_) && IsValid(c_space_) && IsValid(d_space_) |
---|
167 | && IsValid(x_l_space_) && IsValid(px_l_space_) |
---|
168 | && IsValid(x_u_space_) && IsValid(px_u_space_) |
---|
169 | && IsValid(d_u_space_) && IsValid(pd_u_space_) |
---|
170 | && IsValid(d_l_space_) && IsValid(pd_l_space_) |
---|
171 | && IsValid(jac_c_space_) && IsValid(jac_d_space_) |
---|
172 | && IsValid(h_space_) |
---|
173 | && IsValid(scaled_jac_c_space_) |
---|
174 | && IsValid(scaled_jac_d_space_) |
---|
175 | && IsValid(scaled_h_space_)); |
---|
176 | |
---|
177 | DBG_ASSERT(retValue && "Model cannot return null vector or matrix prototypes or spaces," |
---|
178 | " please return zero length vectors instead"); |
---|
179 | } |
---|
180 | else { |
---|
181 | ASSERT_EXCEPTION(IsValid(x_space_), INVALID_WARMSTART, |
---|
182 | "OrigIpoptNLP called with warm_start_same_structure, but the problem is solved for the first time."); |
---|
183 | } |
---|
184 | |
---|
185 | // Create the bounds structures |
---|
186 | SmartPtr<Vector> x_L = x_l_space_->MakeNew(); |
---|
187 | SmartPtr<Matrix> Px_L = px_l_space_->MakeNew(); |
---|
188 | SmartPtr<Vector> x_U = x_u_space_->MakeNew(); |
---|
189 | SmartPtr<Matrix> Px_U = px_u_space_->MakeNew(); |
---|
190 | SmartPtr<Vector> d_L = d_l_space_->MakeNew(); |
---|
191 | SmartPtr<Matrix> Pd_L = pd_l_space_->MakeNew(); |
---|
192 | SmartPtr<Vector> d_U = d_u_space_->MakeNew(); |
---|
193 | SmartPtr<Matrix> Pd_U = pd_u_space_->MakeNew(); |
---|
194 | |
---|
195 | retValue = nlp_->GetBoundsInformation(*Px_L, *x_L, *Px_U, *x_U, |
---|
196 | *Pd_L, *d_L, *Pd_U, *d_U); |
---|
197 | |
---|
198 | if (!retValue) { |
---|
199 | return false; |
---|
200 | } |
---|
201 | |
---|
202 | relax_bounds(-bound_relax_factor_, *x_L); |
---|
203 | relax_bounds( bound_relax_factor_, *x_U); |
---|
204 | relax_bounds(-bound_relax_factor_, *d_L); |
---|
205 | relax_bounds( bound_relax_factor_, *d_U); |
---|
206 | |
---|
207 | x_L_ = ConstPtr(x_L); |
---|
208 | Px_L_ = ConstPtr(Px_L); |
---|
209 | x_U_ = ConstPtr(x_U); |
---|
210 | Px_U_ = ConstPtr(Px_U); |
---|
211 | d_L_ = ConstPtr(d_L); |
---|
212 | Pd_L_ = ConstPtr(Pd_L); |
---|
213 | d_U_ = ConstPtr(d_U); |
---|
214 | Pd_U_ = ConstPtr(Pd_U); |
---|
215 | |
---|
216 | // now create and store the scaled bounds |
---|
217 | x_L_ = NLP_scaling()->apply_vector_scaling_x_LU(*Px_L_, x_L_, *x_space_); |
---|
218 | x_U_ = NLP_scaling()->apply_vector_scaling_x_LU(*Px_U_, x_U_, *x_space_); |
---|
219 | d_L_ = NLP_scaling()->apply_vector_scaling_d_LU(*Pd_L_, d_L_, *d_space_); |
---|
220 | d_U_ = NLP_scaling()->apply_vector_scaling_d_LU(*Pd_U_, d_U_, *d_space_); |
---|
221 | |
---|
222 | // Create the iterates structures |
---|
223 | x = x_space_->MakeNew(); |
---|
224 | y_c = c_space_->MakeNew(); |
---|
225 | y_d = d_space_->MakeNew(); |
---|
226 | z_L = x_l_space_->MakeNew(); |
---|
227 | z_U = x_u_space_->MakeNew(); |
---|
228 | v_L = d_l_space_->MakeNew(); |
---|
229 | v_U = d_u_space_->MakeNew(); |
---|
230 | |
---|
231 | retValue = nlp_->GetStartingPoint(GetRawPtr(x), init_x, |
---|
232 | GetRawPtr(y_c), init_y_c, |
---|
233 | GetRawPtr(y_d), init_y_d, |
---|
234 | GetRawPtr(z_L), init_z_L, |
---|
235 | GetRawPtr(z_U), init_z_U); |
---|
236 | |
---|
237 | if (!retValue) { |
---|
238 | return false; |
---|
239 | } |
---|
240 | |
---|
241 | Number obj_scal = NLP_scaling()->apply_obj_scaling(1.); |
---|
242 | if (init_x) { |
---|
243 | if (NLP_scaling()->have_x_scaling()) { |
---|
244 | x = NLP_scaling()->apply_vector_scaling_x_NonConst(ConstPtr(x)); |
---|
245 | } |
---|
246 | } |
---|
247 | if (init_y_c) { |
---|
248 | if (NLP_scaling()->have_c_scaling()) { |
---|
249 | y_c = NLP_scaling()->unapply_vector_scaling_c_NonConst(ConstPtr(y_c)); |
---|
250 | } |
---|
251 | if (obj_scal!=1.) { |
---|
252 | y_c->Scal(obj_scal); |
---|
253 | } |
---|
254 | } |
---|
255 | if (init_y_d) { |
---|
256 | if (NLP_scaling()->have_d_scaling()) { |
---|
257 | y_d = NLP_scaling()->unapply_vector_scaling_d_NonConst(ConstPtr(y_d)); |
---|
258 | } |
---|
259 | if (obj_scal!=1.) { |
---|
260 | y_d->Scal(obj_scal); |
---|
261 | } |
---|
262 | } |
---|
263 | if (init_z_L) { |
---|
264 | if (NLP_scaling()->have_x_scaling()) { |
---|
265 | z_L = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_L_, ConstPtr(z_L), *x_space_); |
---|
266 | } |
---|
267 | if (obj_scal!=1.) { |
---|
268 | z_L->Scal(obj_scal); |
---|
269 | } |
---|
270 | } |
---|
271 | if (init_z_U) { |
---|
272 | if (NLP_scaling()->have_x_scaling()) { |
---|
273 | z_U = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_U_, ConstPtr(z_U), *x_space_); |
---|
274 | } |
---|
275 | if (obj_scal!=1.) { |
---|
276 | z_U->Scal(obj_scal); |
---|
277 | } |
---|
278 | } |
---|
279 | |
---|
280 | return true; |
---|
281 | } |
---|
282 | |
---|
283 | void |
---|
284 | OrigIpoptNLP::relax_bounds(Number bound_relax_factor, Vector& bounds) |
---|
285 | { |
---|
286 | DBG_START_METH("OrigIpoptNLP::relax_bounds", dbg_verbosity); |
---|
287 | if (bound_relax_factor!=0.) { |
---|
288 | SmartPtr<Vector> tmp = bounds.MakeNew(); |
---|
289 | tmp->Copy(bounds); |
---|
290 | tmp->ElementWiseAbs(); |
---|
291 | SmartPtr<Vector> ones = bounds.MakeNew(); |
---|
292 | ones->Set(1.); |
---|
293 | tmp->ElementWiseMax(*ones); |
---|
294 | DBG_PRINT((1, "bound_relax_factor = %e", bound_relax_factor)); |
---|
295 | DBG_PRINT_VECTOR(2, "tmp", *tmp); |
---|
296 | DBG_PRINT_VECTOR(2, "bounds before", bounds); |
---|
297 | bounds.Axpy(bound_relax_factor, *tmp); |
---|
298 | DBG_PRINT_VECTOR(2, "bounds after", bounds); |
---|
299 | } |
---|
300 | } |
---|
301 | |
---|
302 | Number OrigIpoptNLP::f(const Vector& x) |
---|
303 | { |
---|
304 | DBG_START_METH("OrigIpoptNLP::f", dbg_verbosity); |
---|
305 | Number ret = 0.0; |
---|
306 | DBG_PRINT((2, "x.Tag = %d\n", x.GetTag())); |
---|
307 | if (!f_cache_.GetCachedResult1Dep(ret, &x)) { |
---|
308 | f_evals_++; |
---|
309 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
310 | f_eval_time_.Start(); |
---|
311 | bool success = nlp_->Eval_f(*unscaled_x, ret); |
---|
312 | f_eval_time_.End(); |
---|
313 | DBG_PRINT((1, "success = %d ret = %e\n", success, ret)); |
---|
314 | ASSERT_EXCEPTION(success && IsFiniteNumber(ret), Eval_Error, |
---|
315 | "Error evaluating the objective function"); |
---|
316 | ret = NLP_scaling()->apply_obj_scaling(ret); |
---|
317 | f_cache_.AddCachedResult1Dep(ret, &x); |
---|
318 | } |
---|
319 | |
---|
320 | return ret; |
---|
321 | } |
---|
322 | |
---|
323 | Number OrigIpoptNLP::f(const Vector& x, Number mu) |
---|
324 | { |
---|
325 | assert(false && "ERROR: This method is only a placeholder for f(mu) and should not be called"); |
---|
326 | return 0.; |
---|
327 | } |
---|
328 | |
---|
329 | SmartPtr<const Vector> OrigIpoptNLP::grad_f(const Vector& x) |
---|
330 | { |
---|
331 | SmartPtr<Vector> unscaled_grad_f; |
---|
332 | SmartPtr<const Vector> retValue; |
---|
333 | if (!grad_f_cache_.GetCachedResult1Dep(retValue, &x)) { |
---|
334 | grad_f_evals_++; |
---|
335 | unscaled_grad_f = x_space_->MakeNew(); |
---|
336 | |
---|
337 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
338 | grad_f_eval_time_.Start(); |
---|
339 | bool success = nlp_->Eval_grad_f(*unscaled_x, *unscaled_grad_f); |
---|
340 | grad_f_eval_time_.End(); |
---|
341 | ASSERT_EXCEPTION(success && IsFiniteNumber(unscaled_grad_f->Nrm2()), |
---|
342 | Eval_Error, "Error evaluating the gradient of the objective function"); |
---|
343 | retValue = NLP_scaling()->apply_grad_obj_scaling(ConstPtr(unscaled_grad_f)); |
---|
344 | grad_f_cache_.AddCachedResult1Dep(retValue, &x); |
---|
345 | } |
---|
346 | |
---|
347 | return retValue; |
---|
348 | } |
---|
349 | |
---|
350 | SmartPtr<const Vector> OrigIpoptNLP::grad_f(const Vector& x, Number mu) |
---|
351 | { |
---|
352 | assert(false && "ERROR: This method is only a placeholder for grad_f(mu) and should not be called"); |
---|
353 | return NULL; |
---|
354 | } |
---|
355 | |
---|
356 | /** Equality constraint residual */ |
---|
357 | SmartPtr<const Vector> OrigIpoptNLP::c(const Vector& x) |
---|
358 | { |
---|
359 | SmartPtr<const Vector> retValue; |
---|
360 | if (c_space_->Dim()==0) { |
---|
361 | // We do this caching of an empty vector so that the returned |
---|
362 | // Vector has always the same tag (this might make a difference |
---|
363 | // in cases where only the constraints are supposed to change... |
---|
364 | SmartPtr<const Vector> dep = NULL; |
---|
365 | if (!c_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) { |
---|
366 | retValue = c_space_->MakeNew(); |
---|
367 | c_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep)); |
---|
368 | } |
---|
369 | } |
---|
370 | else { |
---|
371 | if (!c_cache_.GetCachedResult1Dep(retValue, x)) { |
---|
372 | SmartPtr<Vector> unscaled_c = c_space_->MakeNew(); |
---|
373 | c_evals_++; |
---|
374 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
375 | c_eval_time_.Start(); |
---|
376 | bool success = nlp_->Eval_c(*unscaled_x, *unscaled_c); |
---|
377 | c_eval_time_.End(); |
---|
378 | ASSERT_EXCEPTION(success && IsFiniteNumber(unscaled_c->Nrm2()), |
---|
379 | Eval_Error, "Error evaluating the equality constraints"); |
---|
380 | retValue = NLP_scaling()->apply_vector_scaling_c(ConstPtr(unscaled_c)); |
---|
381 | c_cache_.AddCachedResult1Dep(retValue, x); |
---|
382 | } |
---|
383 | } |
---|
384 | |
---|
385 | return retValue; |
---|
386 | } |
---|
387 | |
---|
388 | SmartPtr<const Vector> OrigIpoptNLP::d(const Vector& x) |
---|
389 | { |
---|
390 | DBG_START_METH("OrigIpoptNLP::d", dbg_verbosity); |
---|
391 | SmartPtr<const Vector> retValue; |
---|
392 | if (d_space_->Dim()==0) { |
---|
393 | // We do this caching of an empty vector so that the returned |
---|
394 | // Vector has always the same tag (this might make a difference |
---|
395 | // in cases where only the constraints are supposed to change... |
---|
396 | SmartPtr<const Vector> dep = NULL; |
---|
397 | if (!d_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) { |
---|
398 | retValue = d_space_->MakeNew(); |
---|
399 | d_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep)); |
---|
400 | } |
---|
401 | } |
---|
402 | else { |
---|
403 | if (!d_cache_.GetCachedResult1Dep(retValue, x)) { |
---|
404 | d_evals_++; |
---|
405 | SmartPtr<Vector> unscaled_d = d_space_->MakeNew(); |
---|
406 | |
---|
407 | DBG_PRINT_VECTOR(2, "scaled_x", x); |
---|
408 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
409 | d_eval_time_.Start(); |
---|
410 | bool success = nlp_->Eval_d(*unscaled_x, *unscaled_d); |
---|
411 | d_eval_time_.End(); |
---|
412 | DBG_PRINT_VECTOR(2, "unscaled_d", *unscaled_d); |
---|
413 | ASSERT_EXCEPTION(success && IsFiniteNumber(unscaled_d->Nrm2()), |
---|
414 | Eval_Error, "Error evaluating the inequality constraints"); |
---|
415 | retValue = NLP_scaling()->apply_vector_scaling_d(ConstPtr(unscaled_d)); |
---|
416 | d_cache_.AddCachedResult1Dep(retValue, x); |
---|
417 | } |
---|
418 | } |
---|
419 | |
---|
420 | return retValue; |
---|
421 | } |
---|
422 | |
---|
423 | SmartPtr<const Matrix> OrigIpoptNLP::jac_c(const Vector& x) |
---|
424 | { |
---|
425 | SmartPtr<const Matrix> retValue; |
---|
426 | if (c_space_->Dim()==0) { |
---|
427 | // We do this caching of an empty vector so that the returned |
---|
428 | // Matrix has always the same tag |
---|
429 | SmartPtr<const Vector> dep = NULL; |
---|
430 | if (!jac_c_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) { |
---|
431 | SmartPtr<Matrix> unscaled_jac_c = jac_c_space_->MakeNew(); |
---|
432 | retValue = NLP_scaling()->apply_jac_c_scaling(ConstPtr(unscaled_jac_c)); |
---|
433 | jac_c_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep)); |
---|
434 | } |
---|
435 | } |
---|
436 | else { |
---|
437 | if (!jac_c_cache_.GetCachedResult1Dep(retValue, x)) { |
---|
438 | jac_c_evals_++; |
---|
439 | SmartPtr<Matrix> unscaled_jac_c = jac_c_space_->MakeNew(); |
---|
440 | |
---|
441 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
442 | jac_c_eval_time_.Start(); |
---|
443 | bool success = nlp_->Eval_jac_c(*unscaled_x, *unscaled_jac_c); |
---|
444 | jac_c_eval_time_.End(); |
---|
445 | ASSERT_EXCEPTION(success, Eval_Error, "Error evaluating the jacobian of the equality constraints"); |
---|
446 | retValue = NLP_scaling()->apply_jac_c_scaling(ConstPtr(unscaled_jac_c)); |
---|
447 | jac_c_cache_.AddCachedResult1Dep(retValue, x); |
---|
448 | } |
---|
449 | } |
---|
450 | |
---|
451 | return retValue; |
---|
452 | } |
---|
453 | |
---|
454 | SmartPtr<const Matrix> OrigIpoptNLP::jac_d(const Vector& x) |
---|
455 | { |
---|
456 | SmartPtr<const Matrix> retValue; |
---|
457 | if (d_space_->Dim()==0) { |
---|
458 | // We do this caching of an empty vector so that the returned |
---|
459 | // Matrix has always the same tag |
---|
460 | SmartPtr<const Vector> dep = NULL; |
---|
461 | if (!jac_d_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) { |
---|
462 | SmartPtr<Matrix> unscaled_jac_d = jac_d_space_->MakeNew(); |
---|
463 | retValue = NLP_scaling()->apply_jac_d_scaling(ConstPtr(unscaled_jac_d)); |
---|
464 | jac_d_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep)); |
---|
465 | } |
---|
466 | } |
---|
467 | else { |
---|
468 | if (!jac_d_cache_.GetCachedResult1Dep(retValue, x)) { |
---|
469 | jac_d_evals_++; |
---|
470 | SmartPtr<Matrix> unscaled_jac_d = jac_d_space_->MakeNew(); |
---|
471 | |
---|
472 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
473 | jac_d_eval_time_.Start(); |
---|
474 | bool success = nlp_->Eval_jac_d(*unscaled_x, *unscaled_jac_d); |
---|
475 | jac_d_eval_time_.End(); |
---|
476 | ASSERT_EXCEPTION(success, Eval_Error, "Error evaluating the jacobian of the inequality constraints"); |
---|
477 | retValue = NLP_scaling()->apply_jac_d_scaling(ConstPtr(unscaled_jac_d)); |
---|
478 | jac_d_cache_.AddCachedResult1Dep(retValue, x); |
---|
479 | } |
---|
480 | } |
---|
481 | |
---|
482 | return retValue; |
---|
483 | } |
---|
484 | |
---|
485 | SmartPtr<const SymMatrix> OrigIpoptNLP::h(const Vector& x, |
---|
486 | Number obj_factor, |
---|
487 | const Vector& yc, |
---|
488 | const Vector& yd) |
---|
489 | { |
---|
490 | std::vector<const TaggedObject*> deps(3); |
---|
491 | deps[0] = &x; |
---|
492 | deps[1] = &yc; |
---|
493 | deps[2] = &yd; |
---|
494 | std::vector<Number> scalar_deps(1); |
---|
495 | scalar_deps[0] = obj_factor; |
---|
496 | |
---|
497 | SmartPtr<SymMatrix> unscaled_h; |
---|
498 | SmartPtr<const SymMatrix> retValue; |
---|
499 | if (!h_cache_.GetCachedResult(retValue, deps, scalar_deps)) { |
---|
500 | h_evals_++; |
---|
501 | unscaled_h = h_space_->MakeNewSymMatrix(); |
---|
502 | |
---|
503 | SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
504 | SmartPtr<const Vector> unscaled_yc = NLP_scaling()->apply_vector_scaling_c(&yc); |
---|
505 | SmartPtr<const Vector> unscaled_yd = NLP_scaling()->apply_vector_scaling_d(&yd); |
---|
506 | Number scaled_obj_factor = NLP_scaling()->apply_obj_scaling(obj_factor); |
---|
507 | h_eval_time_.Start(); |
---|
508 | bool success = nlp_->Eval_h(*unscaled_x, scaled_obj_factor, *unscaled_yc, *unscaled_yd, *unscaled_h); |
---|
509 | h_eval_time_.End(); |
---|
510 | ASSERT_EXCEPTION(success, Eval_Error, "Error evaluating the hessian of the lagrangian"); |
---|
511 | retValue = NLP_scaling()->apply_hessian_scaling(ConstPtr(unscaled_h)); |
---|
512 | h_cache_.AddCachedResult(retValue, deps, scalar_deps); |
---|
513 | } |
---|
514 | |
---|
515 | return retValue; |
---|
516 | } |
---|
517 | |
---|
518 | SmartPtr<const SymMatrix> OrigIpoptNLP::h(const Vector& x, |
---|
519 | Number obj_factor, |
---|
520 | const Vector& yc, |
---|
521 | const Vector& yd, |
---|
522 | Number mu) |
---|
523 | { |
---|
524 | assert(false && |
---|
525 | "ERROR: This method is only a for h(mu) and should not be called"); |
---|
526 | return NULL; |
---|
527 | } |
---|
528 | |
---|
529 | |
---|
530 | void OrigIpoptNLP::GetSpaces(SmartPtr<const VectorSpace>& x_space, |
---|
531 | SmartPtr<const VectorSpace>& c_space, |
---|
532 | SmartPtr<const VectorSpace>& d_space, |
---|
533 | SmartPtr<const VectorSpace>& x_l_space, |
---|
534 | SmartPtr<const MatrixSpace>& px_l_space, |
---|
535 | SmartPtr<const VectorSpace>& x_u_space, |
---|
536 | SmartPtr<const MatrixSpace>& px_u_space, |
---|
537 | SmartPtr<const VectorSpace>& d_l_space, |
---|
538 | SmartPtr<const MatrixSpace>& pd_l_space, |
---|
539 | SmartPtr<const VectorSpace>& d_u_space, |
---|
540 | SmartPtr<const MatrixSpace>& pd_u_space, |
---|
541 | SmartPtr<const MatrixSpace>& Jac_c_space, |
---|
542 | SmartPtr<const MatrixSpace>& Jac_d_space, |
---|
543 | SmartPtr<const SymMatrixSpace>& Hess_lagrangian_space) |
---|
544 | { |
---|
545 | // Make sure that we already have all the pointers |
---|
546 | DBG_ASSERT(IsValid(x_space_) && |
---|
547 | IsValid(c_space_) && |
---|
548 | IsValid(d_space_) && |
---|
549 | IsValid(x_l_space_) && |
---|
550 | IsValid(px_l_space_) && |
---|
551 | IsValid(x_u_space_) && |
---|
552 | IsValid(px_u_space_) && |
---|
553 | IsValid(d_l_space_) && |
---|
554 | IsValid(pd_l_space_) && |
---|
555 | IsValid(d_u_space_) && |
---|
556 | IsValid(pd_u_space_) && |
---|
557 | IsValid(scaled_jac_c_space_) && |
---|
558 | IsValid(scaled_jac_d_space_) && |
---|
559 | IsValid(scaled_h_space_)); |
---|
560 | |
---|
561 | DBG_ASSERT(IsValid(NLP_scaling())); |
---|
562 | |
---|
563 | x_space = x_space_; |
---|
564 | c_space = c_space_; |
---|
565 | d_space = d_space_; |
---|
566 | x_l_space = x_l_space_; |
---|
567 | px_l_space = px_l_space_; |
---|
568 | x_u_space = x_u_space_; |
---|
569 | px_u_space = px_u_space_; |
---|
570 | d_l_space = d_l_space_; |
---|
571 | pd_l_space = pd_l_space_; |
---|
572 | d_u_space = d_u_space_; |
---|
573 | pd_u_space = pd_u_space_; |
---|
574 | Jac_c_space = scaled_jac_c_space_; |
---|
575 | Jac_d_space = scaled_jac_d_space_; |
---|
576 | Hess_lagrangian_space = scaled_h_space_; |
---|
577 | } |
---|
578 | |
---|
579 | void OrigIpoptNLP::FinalizeSolution(SolverReturn status, |
---|
580 | const Vector& x, const Vector& z_L, const Vector& z_U, |
---|
581 | const Vector& c, const Vector& d, |
---|
582 | const Vector& y_c, const Vector& y_d, |
---|
583 | Number obj_value) |
---|
584 | { |
---|
585 | // need to submit the unscaled solution back to the nlp |
---|
586 | SmartPtr<const Vector> unscaled_x = |
---|
587 | NLP_scaling()->unapply_vector_scaling_x(&x); |
---|
588 | SmartPtr<const Vector> unscaled_c = |
---|
589 | NLP_scaling()->unapply_vector_scaling_c(&c); |
---|
590 | SmartPtr<const Vector> unscaled_d = |
---|
591 | NLP_scaling()->unapply_vector_scaling_d(&d); |
---|
592 | const Number unscaled_obj = NLP_scaling()->unapply_obj_scaling(obj_value); |
---|
593 | |
---|
594 | SmartPtr<const Vector> unscaled_z_L; |
---|
595 | SmartPtr<const Vector> unscaled_z_U; |
---|
596 | SmartPtr<const Vector> unscaled_y_c; |
---|
597 | SmartPtr<const Vector> unscaled_y_d; |
---|
598 | |
---|
599 | // The objective function scaling factor also appears in the constraints |
---|
600 | Number obj_unscale_factor = NLP_scaling()->unapply_obj_scaling(1.); |
---|
601 | if (obj_unscale_factor!=1.) { |
---|
602 | SmartPtr<Vector> tmp = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_L_, &z_L, *x_space_); |
---|
603 | tmp->Scal(obj_unscale_factor); |
---|
604 | unscaled_z_L = ConstPtr(tmp); |
---|
605 | |
---|
606 | tmp = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_U_, &z_U, *x_space_); |
---|
607 | tmp->Scal(obj_unscale_factor); |
---|
608 | unscaled_z_U = ConstPtr(tmp); |
---|
609 | |
---|
610 | tmp = NLP_scaling()->apply_vector_scaling_c_NonConst(&y_c); |
---|
611 | tmp->Scal(obj_unscale_factor); |
---|
612 | unscaled_y_c = ConstPtr(tmp); |
---|
613 | |
---|
614 | tmp = NLP_scaling()->apply_vector_scaling_d_NonConst(&y_d); |
---|
615 | tmp->Scal(obj_unscale_factor); |
---|
616 | unscaled_y_d = ConstPtr(tmp); |
---|
617 | } |
---|
618 | else { |
---|
619 | unscaled_z_L = NLP_scaling()->apply_vector_scaling_x_LU(*Px_L_, &z_L, *x_space_); |
---|
620 | unscaled_z_U = NLP_scaling()->apply_vector_scaling_x_LU(*Px_U_, &z_U, *x_space_); |
---|
621 | unscaled_y_c = NLP_scaling()->apply_vector_scaling_c(&y_c); |
---|
622 | unscaled_y_d = NLP_scaling()->apply_vector_scaling_d(&y_d); |
---|
623 | } |
---|
624 | |
---|
625 | nlp_->FinalizeSolution(status, *unscaled_x, |
---|
626 | *unscaled_z_L, *unscaled_z_U, |
---|
627 | *unscaled_c, *unscaled_d, |
---|
628 | *unscaled_y_c, *unscaled_y_d, |
---|
629 | unscaled_obj); |
---|
630 | } |
---|
631 | |
---|
632 | void OrigIpoptNLP::AdjustVariableBounds(const Vector& new_x_L, const Vector& new_x_U, |
---|
633 | const Vector& new_d_L, const Vector& new_d_U) |
---|
634 | { |
---|
635 | x_L_ = new_x_L.MakeNewCopy(); |
---|
636 | x_U_ = new_x_U.MakeNewCopy(); |
---|
637 | d_L_ = new_d_L.MakeNewCopy(); |
---|
638 | d_U_ = new_d_U.MakeNewCopy(); |
---|
639 | } |
---|
640 | |
---|
641 | void |
---|
642 | OrigIpoptNLP::PrintTimingStatistics( |
---|
643 | Journalist& jnlst, |
---|
644 | EJournalLevel level, |
---|
645 | EJournalCategory category) const |
---|
646 | { |
---|
647 | if (!jnlst.ProduceOutput(level, category)) |
---|
648 | return; |
---|
649 | |
---|
650 | jnlst.Printf(level, category, |
---|
651 | "Function Evaluations................: %10.2f\n", |
---|
652 | f_eval_time_.TotalTime()+ |
---|
653 | c_eval_time_.TotalTime()+ |
---|
654 | d_eval_time_.TotalTime()+ |
---|
655 | jac_c_eval_time_.TotalTime()+ |
---|
656 | jac_d_eval_time_.TotalTime()+ |
---|
657 | h_eval_time_.TotalTime()); |
---|
658 | jnlst.Printf(level, category, |
---|
659 | " Objective function.................: %10.2f\n", |
---|
660 | f_eval_time_.TotalTime()); |
---|
661 | jnlst.Printf(level, category, |
---|
662 | " Equality constraints...............: %10.2f\n", |
---|
663 | c_eval_time_.TotalTime()); |
---|
664 | jnlst.Printf(level, category, |
---|
665 | " Inequality constraints.............: %10.2f\n", |
---|
666 | d_eval_time_.TotalTime()); |
---|
667 | jnlst.Printf(level, category, |
---|
668 | " Equality constraint Jacobian.......: %10.2f\n", |
---|
669 | jac_c_eval_time_.TotalTime()); |
---|
670 | jnlst.Printf(level, category, |
---|
671 | " Inequality constraint Jacobian.....: %10.2f\n", |
---|
672 | jac_d_eval_time_.TotalTime()); |
---|
673 | jnlst.Printf(level, category, |
---|
674 | " Lagrangian Hessian.................: %10.2f\n", |
---|
675 | h_eval_time_.TotalTime()); |
---|
676 | } |
---|
677 | |
---|
678 | } // namespace Ipopt |
---|