# source:coopr.pysp/trunk/coopr/pysp/phobjective.py@3217

Last change on this file since 3217 was 3217, checked in by jwatson, 11 years ago

Various updates to support heteogeneous index sets in PH for different nodes in the scenario tree - more work / testing remains.

• Property svn:executable set to `*`
File size: 18.1 KB
Line
1#  _________________________________________________________________________
2#
3#  Coopr: A COmmon Optimization Python Repository
4#  Copyright (c) 2010 Sandia Corporation.
5#  This software is distributed under the BSD License.
6#  Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
7#  the U.S. Government retains certain rights in this software.
8#  For more information, see the Coopr README.txt file.
9#  _________________________________________________________________________
10
11# this module contains various utilities for creating PH weighted penalty
12# objectives, e.g., through quadratic or linearized penalty terms.
13
14from math import fabs, log, exp
15from coopr.pyomo import *
16from phutils import indexToString
17
18# IMPT: In general, the breakpoint computation codes can return a 2-list even if the lb equals
19#       the ub. This case happens quite often in real models (although typically lb=xvag=ub).
20#       See the code for constructing the pieces on how this case is handled in the linearization.
21
22#
23# routine to compute linearization breakpoints uniformly between the bounds and the mean.
24#
25
26def compute_uniform_breakpoints(lb, node_min, xavg, node_max, ub, num_breakpoints_per_side, tolerance):
27
28   breakpoints = []
29
30   # add the lower bound - the first breakpoint.
31   breakpoints.append(lb)
32
33   # determine the breakpoints to the left of the mean.
34   left_step = (xavg - lb) / num_breakpoints_per_side
35   current_x = lb
36   for i in range(1,num_breakpoints_per_side+1):
37      this_lb = current_x
38      this_ub = current_x+left_step
39      if (fabs(this_lb-lb) > tolerance) and (fabs(this_lb-xavg) > tolerance):
40         breakpoints.append(this_lb)
41      current_x += left_step
42
43   # add the mean - it's always a breakpoint. unless!
44   # the lb or ub and the avg are the same.
45   if (fabs(lb-xavg) > tolerance) and (fabs(ub-xavg) > tolerance):
46      breakpoints.append(xavg)
47
48   # determine the breakpoints to the right of the mean.
49   right_step = (ub - xavg) / num_breakpoints_per_side
50   current_x = xavg
51   for i in range(1,num_breakpoints_per_side+1):
52      this_lb = current_x
53      this_ub = current_x+right_step
54      if (fabs(this_ub-xavg) > tolerance) and (fabs(this_ub-ub) > tolerance):
55         breakpoints.append(this_ub)
56      current_x += right_step
57
58   # add the upper bound - the last breakpoint.
59   # the upper bound should always be different than the lower bound (I say with some
60   # hesitation - it really depends on what plugins are doing to modify the bounds dynamically).
61   breakpoints.append(ub)
62
63   return breakpoints
64
65#
66# routine to compute linearization breakpoints uniformly between the current node min/max bounds.
67#
68
69def compute_uniform_between_nodestat_breakpoints(lb, node_min, xavg, node_max, ub, num_breakpoints, tolerance):
70
71   breakpoints = []
72
73   # add the lower bound - the first breakpoint.
74   breakpoints.append(lb)
75
76   # add the node-min - the second breakpoint. but only if it is different than the lower bound and the mean.
77   if (fabs(node_min-lb) > tolerance) and (fabs(node_min-xavg) > tolerance):
78      breakpoints.append(node_min)
79
80   step = (node_max - node_min) / num_breakpoints
81   current_x = node_min
82   for i in range(1,num_breakpoints+1):
83      this_lb = current_x
84      this_ub = current_x+step
85      if (fabs(this_lb-node_min) > tolerance) and (fabs(this_lb-node_max) > tolerance) and (fabs(this_lb-xavg) > tolerance):
86         breakpoints.append(this_lb)
87      current_x += step
88
89   # add the node-max - the second-to-last breakpoint. but only if it is different than the upper bound and the mean.
90   if (fabs(node_max-ub) > tolerance) and (fabs(node_max-xavg) > tolerance):
91      breakpoints.append(node_max)
92
93   # add the upper bound - the last breakpoint.
94   breakpoints.append(ub)
95
96   # add the mean - it's always a breakpoint. unless! -
97   # it happens to be equal to (within tolerance) the lower or upper bounds.
98   # sort to insert it in the correct place.
99   if (fabs(xavg - lb) > tolerance) and (fabs(xavg - ub) > tolerance):
100      breakpoints.append(xavg)
101   breakpoints.sort()
102
103   return breakpoints
104
105#
106# routine to compute linearization breakpoints using "Woodruff" relaxation of the compute_uniform_between_nodestat_breakpoints.
107#
108
109def compute_uniform_between_woodruff_breakpoints(lb, node_min, xavg, node_max, ub, num_breakpoints, tolerance):
110
111   breakpoints = []
112
113   # add the lower bound - the first breakpoint.
114   breakpoints.append(lb)
115
116   # be either three "differences" from the mean, or else "halfway to the bound", whichever is closer to the mean.
117   left = max(xavg - 3.0 * (xavg - node_min), xavg - 0.5 * (xavg - lb))
118   right = min(xavg + 3.0 * (node_max - xavg), xavg + 0.5 * (ub - xavg))
119
120   # add the left bound - the second breakpoint. but only if it is different than the lower bound and the mean.
121   if (fabs(left-lb) > tolerance) and (fabs(left-xavg) > tolerance):
122      breakpoints.append(left)
123
124   step = (right - left) / num_breakpoints
125   current_x = left
126   for i in range(1,num_breakpoints+1):
127      this_lb = current_x
128      this_ub = current_x+step
129      if (fabs(this_lb-left) > tolerance) and (fabs(this_lb-right) > tolerance) and (fabs(this_lb-xavg) > tolerance):
130         breakpoints.append(this_lb)
131      current_x += step
132
133   # add the right bound - the second-to-last breakpoint. but only if it is different than the upper bound and the mean.
134   if (fabs(right-ub) > tolerance) and (fabs(right-xavg) > tolerance):
135      breakpoints.append(right)
136
137   # add the upper bound - the last breakpoint.
138   breakpoints.append(ub)
139
140   # add the mean - it's always a breakpoint.
141   # sort to insert it in the correct place.
142   breakpoints.append(xavg)
143   breakpoints.sort()
144
145   return breakpoints
146
147#
148# routine to compute linearization breakpoints based on an exponential distribution from the mean in each direction.
149#
150
151def compute_exponential_from_mean_breakpoints(lb, node_min, xavg, node_max, ub, num_breakpoints_per_side, tolerance):
152
153   breakpoints = []
154
155   # add the lower bound - the first breakpoint.
156   breakpoints.append(lb)
157
158   # determine the breakpoints to the left of the mean.
159   left_delta = xavg - lb
160   base = exp(log(left_delta) / num_breakpoints_per_side)
161   current_offset = base
162   for i in range(1,num_breakpoints_per_side+1):
163      current_x = xavg - current_offset
164      if (fabs(current_x-lb) > tolerance) and (fabs(current_x-xavg) > tolerance):
165         breakpoints.append(current_x)
166      current_offset *= base
167
168   # add the mean - it's always a breakpoint.
169   breakpoints.append(xavg)
170
171   # determine the breakpoints to the right of the mean.
172   right_delta = ub - xavg
173   base = exp(log(right_delta) / num_breakpoints_per_side)
174   current_offset = base
175   for i in range(1,num_breakpoints_per_side+1):
176      current_x = xavg + current_offset
177      if (fabs(current_x-xavg) > tolerance) and (fabs(current_x-ub) > tolerance):
178         breakpoints.append(current_x)
179      current_offset *= base
180
181   # add the upper bound - the last breakpoint.
182   breakpoints.append(ub)
183
184   return breakpoints
185
186#
187# a utility to create piece-wise linear constraint expressions for a given variable, for
188# use in constructing the augmented (penalized) PH objective. lb and ub are the bounds
189# on this piece, variable is the actual instance variable, and average is the instance
190# parameter specifying the average of this variable across instances sharing passing
191# through a common tree node. lb and ub are floats.
192# IMPT: There are cases where lb=ub, in which case the slope is 0 and the intercept
193#       is simply the penalty at the lower(or upper) bound.
194#
195
196def create_piecewise_constraint_expression(lb, ub, instance_variable, variable_average, quad_variable, tolerance):
197
198   penalty_at_lb = (lb - value(variable_average)) * (lb - value(variable_average))
199   penalty_at_ub = (ub - value(variable_average)) * (ub - value(variable_average))
200   slope = None
201   if fabs(ub-lb) > tolerance:
202      slope = (penalty_at_ub - penalty_at_lb) / (ub - lb)
203   else:
204      slope = 0.0
205   intercept = penalty_at_lb - slope * lb
206   expression = (0.0, quad_variable - slope * instance_variable - intercept, None)
207
208   return expression
209
210#
211# form the baseline objective. really just a wrapper around a clone
212# operation at this point.
213#
214
215def form_standard_objective(instance_name, instance, original_objective_expression, scenario_tree):
216
217   objective_name = instance.active_components(Objective).keys()[0]
218   objective = instance.active_components(Objective)[objective_name]
219   # clone the objective, because we're going to augment (repeatedly) the original objective.
220   objective_expression = original_objective_expression.clone()
221   objective_expression.simplify(instance)
222   instance.active_components(Objective)[objective_name]._data[None].expr = objective_expression
223   instance.active_components(Objective)[objective_name]._quad_subexpr = None
224
225#
226# form the penalized PH objective, guided by various options.
227# returns the list of components added to the instance, e.g.,
228# in the case of constraints required to implement linearization.
229#
230
231def form_ph_objective(instance_name, instance, original_objective_expression, scenario_tree, \
232                      linearize_nonbinary_penalty_terms, drop_proximal_terms, \
233                      retain_quadratic_binary_terms, breakpoint_strategy, \
234                      tolerance, simplify_expressions):
235
236   new_instance_attributes = []
237
238   objective_name = instance.active_components(Objective).keys()[0]
239   objective = instance.active_components(Objective)[objective_name]
240   # clone the objective, because we're going to augment (repeatedly) the original objective.
241   objective_expression = original_objective_expression.clone()
242   # the quadratic expression is really treated as just a list - eventually should be treated as a full expression.
243   quad_expression = 0.0
244
245   # for each blended variable (i.e., those not appearing in the final stage),
246   # add the linear and quadratic penalty terms to the objective.
247
248   for stage in scenario_tree._stages[:-1]: # skip the last stage, as no blending occurs
249
250      variable_tree_node = None
251      for node in stage._tree_nodes:
252         for scenario in node._scenarios:
253            if scenario._name == instance_name:
254               variable_tree_node = node
255               break
256
257      for (reference_variable, index_template) in stage._variables:
258
259         variable_name = reference_variable.name
260         variable_type = reference_variable.domain
261
262         w_parameter_name = "PHWEIGHT_"+variable_name
263         w_parameter = instance.active_components(Param)[w_parameter_name]
264
265         average_parameter_name = "PHAVG_"+variable_name
266         average_parameter = instance.active_components(Param)[average_parameter_name]
267
268         rho_parameter_name = "PHRHO_"+variable_name
269         rho_parameter = instance.active_components(Param)[rho_parameter_name]
270
271         blend_parameter_name = "PHBLEND_"+variable_name
272         blend_parameter = instance.active_components(Param)[blend_parameter_name]
273
274         node_min_parameter = variable_tree_node._minimums[variable_name]
275         node_max_parameter = variable_tree_node._maximums[variable_name]
276
277         quad_penalty_term_variable = None
278         if linearize_nonbinary_penalty_terms > 0:
279            quad_penalty_term_variable_name = "PHQUADPENALTY_"+variable_name
280            quad_penalty_term_variable = instance.active_components(Var)[quad_penalty_term_variable_name]
281
282         instance_variable = instance.active_components(Var)[variable_name]
283
284         variable_indices = variable_tree_node._variable_indices[variable_name]
285
286         for index in variable_indices:
287
288            if (instance_variable[index].status is not VarStatus.unused) and (instance_variable[index].fixed is False):
289
290               # add the linear (w-weighted) term is a consistent fashion, independent of variable type.
291               # if maximizing, here is where you would want "-=" - however, if you do this, the collect/simplify process chokes for reasons currently unknown.
292               objective_expression += (value(w_parameter[index]) * instance_variable[index])
293
294               # there are some cases in which a user may want to avoid the proximal term completely.
295               # it's of course only a good idea when there are at least bounds (both lb and ub) on
296               # the variables to be blended.
297               if drop_proximal_terms is False:
298
299                  # deal with binaries
300                  if isinstance(variable_type, BooleanSet) is True:
301
302                     if retain_quadratic_binary_terms is False:
303                        # this rather ugly form of the linearized quadratic expression term is required
304                        # due to a pyomo bug - the form (rho/2) * (x+y+z) chokes in presolve when distributing
305                        # over the sum.
306                        new_term = (value(blend_parameter[index]) * value(rho_parameter[index]) / 2.0 * instance_variable[index]) - \
307                                   (value(blend_parameter[index]) * value(rho_parameter[index]) * value(average_parameter[index]) * instance_variable[index]) + \
308                                   (value(blend_parameter[index]) * value(rho_parameter[index]) / 2.0 * value(average_parameter[index]) * value(average_parameter[index]))
309                        if objective.sense is minimize:
310                           objective_expression += new_term
311                        else:
312                           objective_expression -= new_term
313                     else:
314                        quad_expression += (blend_parameter[index] * rho_parameter[index] * (instance_variable[index] - average_parameter[index]) ** 2)
315
316                  # deal with everything else
317                  else:
318
319                     if linearize_nonbinary_penalty_terms > 0:
320
321                        # the variables are easy - just a single entry.
322                        if objective.sense is minimize:
323                           objective_expression += (rho_parameter[index] / 2.0 * quad_penalty_term_variable[index])
324                        else:
325                           objective_expression -= (rho_parameter[index] / 2.0 * quad_penalty_term_variable[index])
326
327                        # the constraints are somewhat nastier.
328
329                        # TBD - DEFINE CONSTRAINTS ON-THE-FLY?? (INDIVIDUALLY NAMED FOR NOW - CREATE INDEX SETS!) - OR A LEAST AN INDEX SET PER "PIECE"
330                        xavg = average_parameter[index]
331                        x = instance_variable[index]
332
333                        if x.lb is None or x.ub is None:
334                            var = variable_name + indexToString(index)
335                            msg = "Missing bound for '%s'\n"                  \
336                                  'Both lower and upper bounds required when' \
337                                  ' piece-wise approximating quadratic '      \
338                                  'penalty terms'
339                            raise ValueError, msg % var
340                        lb = x.lb()
341                        ub = x.ub()
342
343                        node_min = value(node_min_parameter[index])
344                        node_max = value(node_max_parameter[index])
345
346                        # compute the breakpoint sequence according to the specified strategy.
347                        try:
348                            strategy = (
349                              compute_uniform_breakpoints,
350                              compute_uniform_between_nodestat_breakpoints,
351                              compute_uniform_between_woodruff_breakpoints,
352                              compute_exponential_from_mean_breakpoints,
353                            )[ breakpoint_strategy ]
354                            args = ( lb, node_min, value(xavg), node_max, ub, \
355                                linearize_nonbinary_penalty_terms, tolerance )
356                            breakpoints = strategy( *args )
357                        except ValueError, e:
358                            msg = 'A breakpoint distribution strategy (%s) '  \
359                                  'is currently not supported within PH!'
360                            raise ValueError, msg % breakpoint_strategy
361
362                        for i in range(0,len(breakpoints)-1):
363
364                           this_lb = breakpoints[i]
365                           this_ub = breakpoints[i+1]
366
367                           piece_constraint_name = "QUAD_PENALTY_PIECE_"+str(i)+"_"+variable_name+str(index)
368                           if hasattr(instance, piece_constraint_name) is False:
369                              # this is the first time the constraint is being added - add it to the list of PH-specific constraints for this instance.
370                              new_instance_attributes.append(piece_constraint_name)
371
372                           piece_constraint = Constraint(name=piece_constraint_name)
373                           piece_constraint.model = instance
374                           piece_expression = create_piecewise_constraint_expression(this_lb, this_ub, x, xavg, \
375                                                                                     quad_penalty_term_variable[index], \
376                                                                                     tolerance)
377                           piece_constraint.add(None, piece_expression)
378                           setattr(instance, piece_constraint_name, piece_constraint)
379
380                     else:
381
382                        quad_expression += (blend_parameter[index] * rho_parameter[index] * (instance_variable[index] - average_parameter[index]) ** 2)
383
384   # simplification should actually not be required, at least for LPs and MIPs - repeating variables
385   # is not problematic in this context. it may be, though, in the land of NLPs. hence, the option.
386   if simplify_expressions is True:
387      objective_expression.simplify(instance)
388
389   # assign the new expression to the objective.
390   instance.active_components(Objective)[objective_name]._data[None].expr = objective_expression
391
392   # if we are linearizing everything, then nothing will appear in the quadratic expression -
393   # don't add the empty "0.0" expression to the objective. otherwise, the output file won't
394   # be properly generated.
395   if quad_expression != 0.0:
396     instance.active_components(Objective)[objective_name]._quad_subexpr = quad_expression
397   else:
398     instance.active_components(Objective)[objective_name]._quad_subexpr = None
399
400   return new_instance_attributes
Note: See TracBrowser for help on using the repository browser.