forked from jump-dev/JuMP.jl
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lp_sensitivity.jl
346 lines (319 loc) · 15 KB
/
lp_sensitivity.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
# Copyright 2019, Iain Dunning, Joey Huchette, Miles Lubin, and contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#############################################################################
# JuMP
# An algebraic modeling language for Julia
# See http://github.com/JuliaOpt/JuMP.jl
#############################################################################
"""
lp_rhs_perturbation_range(constraint::ConstraintRef;
feasibility_tolerance::Float64)
::Tuple{Float64, Float64}
Gives the range by which the rhs coefficient can change and the current LP basis
remains feasible, i.e., where the shadow prices apply.
## Notes
- The rhs coefficient is the value right of the relation, i.e., b for the constraint when of the form a*x □ b, where □ is ≤, =, or ≥.
- The range denotes valid changes, e.g., for a*x <= b + Δ, the LP basis remains feasible for all Δ ∈ [l, u].
- `feasibility_tolerance` is the primal feasibility tolerance, this should
preferably match the tolerance used by the solver. The default tolerance should
however apply in most situations (c.f. "Computational Techniques of the
Simplex Method" by István Maros, section 9.3.4).
"""
function lp_rhs_perturbation_range(constraint::ConstraintRef{Model, <:_MOICON}; feasibility_tolerance::Float64 = 1e-8)
error("The perturbation range of rhs is not defined or not implemented for this type " *
"of constraint.")
end
"""
Builds the standard form constraint matrix, variable bounds, and a vector of constraint reference, one for each row.
If à is the current constraint matrix, and the corresponding feasible set is of the form
s.t. Row_L <= Ãx <= Row_U,
Var_L <= x <= Var_U.
(Equality constraint is interpreted as, e.g., Row_L_i == Row_U_i,
and single sided constraints with an infinite value in the free directions)
Then the function returns a tuple with
A = [Ã -I]
var_lower = [Var_L, Row_L]
var_upper = [Var_U, Row_U]
affine_constraints::Vector{ConstraintRef} - references corresponding to Row_L <= Ãx <= Row_U
This represents the LP problem feasible set of the form
s.t. Ax == 0
var_lower <= x <= var_upper
"""
function _std_matrix(model::Model)
con_types = list_of_constraint_types(model)
con_func_type = first.(con_types)
if !all(broadcast(<:, AffExpr, con_func_type) .| broadcast(<:, VariableRef, con_func_type))
error("The requested operation is not supported because the problem is not a linear optimization problem.")
end
con_set_type = last.(con_types)
if !all(broadcast(<:, con_set_type, Union{MOI.LessThan, MOI.GreaterThan, MOI.EqualTo, MOI.Interval}))
error("The requested operation is not supported because the problem is not a linear optimization problem.")
end
vars = all_variables(model)
var_to_idx = Dict{VariableRef, Int}()
for (j, var) in enumerate(vars)
var_to_idx[var] = j
end
col_j = Int[]
row_i = Int[]
coeffs = Float64[]
var_lower = fill(-Inf, length(vars))
var_upper = fill(Inf, length(vars))
affine_constraints = ConstraintRef[]
cur_row_idx = 0
for (F, S) in con_types
constraints = all_constraints(model, F, S)
if F <: VariableRef
for constraint in constraints
con_obj = constraint_object(constraint)
j = var_to_idx[con_obj.func]
range = MOI.Interval(con_obj.set)
var_lower[j] = max(var_lower[j], range.lower)
var_upper[j] = min(var_upper[j], range.upper)
end
#issue 1892 makes it hard to know how to handle variable constraints, atm. assume unique variable constraints.
else
for constraint in constraints
cur_row_idx += 1
con_obj = constraint_object(constraint)
con_terms = con_obj.func.terms
push!(affine_constraints, constraint)
for (var, coeff) in con_terms
push!(col_j, var_to_idx[var])
push!(row_i, cur_row_idx)
push!(coeffs, coeff)
end
push!(col_j, cur_row_idx + length(vars))
push!(row_i, cur_row_idx)
push!(coeffs, -1.0)
range = MOI.Interval(con_obj.set)
push!(var_lower, range.lower)
push!(var_upper, range.upper)
end
end
end
A = sparse(row_i, col_j, coeffs, cur_row_idx, cur_row_idx + length(vars))
return A, var_lower, var_upper, affine_constraints
end
"""
Returns the optimal primal values for the problem in standard form, c.f. `_std_matrix(model)`.
"""
function _std_primal_solution(model::Model, constraints::Vector{ConstraintRef})
vars = all_variables(model)
return [value.(vars); value.(constraints)]
end
"""
Returns the basis status of all variables of the problem in standard form, c.f. `_std_matrix(model)`.
"""
function _std_basis_status(model::Model)::Vector{MOI.BasisStatusCode}
con_types = list_of_constraint_types(model)
vars = all_variables(model)
var_to_idx = Dict{VariableRef, Int}()
for (j, var) in enumerate(vars)
var_to_idx[var] = j
end
basis_status = fill(MOI.BASIC, length(vars))
for (F, S) in con_types
constraints = all_constraints(model, F, S)
if F <: VariableRef
for constraint in constraints
con_obj = constraint_object(constraint)
j = var_to_idx[con_obj.func]
try
basis_status[j] = MOI.get(model, MOI.ConstraintBasisStatus(), constraint)
catch e
error("The perturbation range of rhs is not available because the solver doesn't "*
"support ´ConstraintBasisStatus´.")
end
if S <: MOI.LessThan && basis_status[j] == MOI.NONBASIC
basis_status[j] = MOI.NONBASIC_AT_UPPER
elseif S <: MOI.GreaterThan && basis_status[j] == MOI.NONBASIC
basis_status[j] = MOI.NONBASIC_AT_LOWER
end
# ´_std_matrix´ checks that no invalid sets are used.
end
#issue 1892 makes it hard to know how to handle variable constraints, atm. assume unique variable constraints.
else
for constraint in constraints
con_basis_status = MOI.get(model, MOI.ConstraintBasisStatus(), constraint)
if S <: MOI.LessThan && con_basis_status == MOI.NONBASIC
con_basis_status = MOI.NONBASIC_AT_UPPER
elseif S <: MOI.GreaterThan && con_basis_status == MOI.NONBASIC
con_basis_status = MOI.NONBASIC_AT_LOWER
end
push!(basis_status, con_basis_status)
end
end
end
return basis_status
end
function lp_rhs_perturbation_range(constraint::ConstraintRef{Model, _MOICON{F, S}}; feasibility_tolerance::Float64 = 1e-8
)::Tuple{Float64, Float64} where {S <: Union{MOI.LessThan, MOI.GreaterThan, MOI.EqualTo}, T, F <: Union{MOI.ScalarAffineFunction{T}, MOI.SingleVariable}}
model = owner_model(constraint)
if termination_status(model) != MOI.OPTIMAL
error("The perturbation range of rhs is not available because the current solution "*
"is not optimal.")
end
try
con_status = MOI.get(model, MOI.ConstraintBasisStatus(), constraint)
# The constraint is inactive.
if con_status == MOI.BASIC
if S <: MOI.LessThan
return value(constraint) - constraint_object(constraint).set.upper, Inf
end
if S <: MOI.GreaterThan
return -Inf, value(constraint) - constraint_object(constraint).set.lower
end
return 0.0, 0.0
end
catch e
error("The perturbation range of rhs is not available because the solver doesn't "*
"support ´ConstraintBasisStatus´.")
end
# The constraint is active.
# TODO: This could be made more efficient because actually we only need the basic columns.
A, L, U, constraints = _std_matrix(model)
var_basis_status = _std_basis_status(model)
x = _std_primal_solution(model, constraints)
# Ax = 0 => B*x_B = -N x_N =: b_N
# Perturb row i by Δ => ̃x_B = B^-1 (b_N + Δ e_i) = x_B + Δ ρ
# Perturb var bound j by Δ => ̃x_B = B^-1 (b_N - Δ N_j) = x_B + Δ ρ
# Find min and max values of Δ : L_B <= ̃x_B <= U_B
# L_B <= x_B + Δ ρ <= U_B -> L_B - x_B <= Δ ρ <= U_B - x_B
# Δ <= (U_B - x_B)_+ ./ ρ_+, (L_B - x_B)_- ./ p_-
# Δ >= (U_B - x_B)_- ./ ρ_-, (L_B - x_B)_+ ./ p_+
basic = var_basis_status .== Ref(MOI.BASIC)
B = A[:, basic]
x_B = x[basic]
local rho::Vector{Float64}
if F <: MOI.SingleVariable
var = constraint_object(constraint).func
j = findfirst(isequal(var), all_variables(model))
rho = -B \ Vector(A[:, j])
else
i = findfirst(isequal(constraint), constraints)
ei = zeros(size(A, 1))
ei[i] = 1.0
rho = B \ ei
end
UmX_B = U[basic] - x_B
LmX_B = L[basic] - x_B
pos = rho .> 0.0
neg = rho .< 0.0
# Find the first basic variable bound that is strictly violated.
lower_bounds_delta = [-Inf; UmX_B[neg] ./ rho[neg]; LmX_B[pos] ./ rho[pos]]
lower_bounds_delta_strict = lower_bounds_delta + [0.0; feasibility_tolerance ./ rho[neg]; -feasibility_tolerance ./ rho[pos]]
upper_bounds_delta = [Inf; UmX_B[pos] ./ rho[pos]; LmX_B[neg] ./ rho[neg]]
upper_bounds_delta_strict = upper_bounds_delta + [0.0; feasibility_tolerance ./ rho[pos]; -feasibility_tolerance ./ rho[neg]]
lower_max_idx = last(findmax(lower_bounds_delta_strict))
upper_min_idx = last(findmin(upper_bounds_delta_strict))
# Return the unperturbed values
return lower_bounds_delta[lower_max_idx], upper_bounds_delta[upper_min_idx]
end
"""
Returns the optimal reduced costs for the variables of the problem in standard form, c.f. `_std_matrix(model)`
"""
function _std_reduced_costs(model::Model, constraints::Vector{ConstraintRef})
con_types = list_of_constraint_types(model)
vars = all_variables(model)
var_to_idx = Dict{VariableRef, Int}()
for (j, var) in enumerate(vars)
var_to_idx[var] = j
end
y = [zeros(length(vars)); dual.(constraints)]
for (F, S) in con_types
if F <: VariableRef
constraints = all_constraints(model, F, S)
for constraint in constraints
con_obj = constraint_object(constraint)
j = var_to_idx[con_obj.func]
y[j] += dual(constraint)
end
#issue 1892 makes is hard to know how to handle variable constraints, atm. assume unique variable constraints
end
end
if objective_sense(model) == MOI.MAX_SENSE
y .*= -1.0
end
return y
end
"""
lp_objective_perturbation_range(var::VariableRef;
optimality_tolerance::Float64)
::Tuple{Float64, Float64}
Gives the range by which the cost coefficient can change and the current LP basis
remains optimal, i.e., the reduced costs remain valid.
## Notes
- The range denotes valid changes, Δ ∈ [l, u], for which cost[var] += Δ do not
violate the current optimality conditions.
- `optimality_tolerance` is the dual feasibility tolerance, this should
preferably match the tolerance used by the solver. The defualt tolerance should
however apply in most situations (c.f. "Computational Techniques of the
Simplex Method" by István Maros, section 9.3.4).
"""
function lp_objective_perturbation_range(var::VariableRef; optimality_tolerance::Float64 = 1e-8)::Tuple{Float64, Float64}
model = owner_model(var)
if termination_status(model) != MOI.OPTIMAL
error("The perturbation range of the objective is not available because the current solution "*
"is not optimal.")
end
if objective_sense(model) == MOI.FEASIBILITY_SENSE
error("The perturbation range of the objective is not applicable on feasibility problems.")
end
if !has_duals(model)
error("The perturbation range of the objective is not available because no dual result is " *
"available.")
end
# TODO: This could be made more efficient for non-basic variables by checking them
# before building calling ´_std_matrix´ and ´_std_reduced_costs´ and only check
# the reduced cost of that variable. (issue 1892 makes this somewhat non-trivial)
# The variable is in the basis, for a minimization problem without variable upper bounds we need:
# c_N - N^T B^-T (c_B + Δ e_j ) >= 0
# c_red - Δ N^T (B^-T e_j) = c_red - Δ N^T ρ = c_red - Δ N_red >= 0
# c_red >= Δ N_red
# maximum( c_red_- ./ N_red_- ) <= Δ <= minimum( c_red_+ ./ N_red_+ )
# If upper bounds are present (and active), those inequalities are flipped.
# If the problem is a maximization problem all inequalities are flipped.
vars = all_variables(model)
j = findfirst(isequal(var), vars)
A, var_lower, var_upper, constraints = _std_matrix(model)
var_basis_status = _std_basis_status(model)
c_red = _std_reduced_costs(model, constraints)
if var_basis_status[j] != MOI.BASIC
if var_lower[j] == var_upper[j]
return -Inf, Inf
end
if (var_basis_status[j] == MOI.NONBASIC_AT_LOWER && objective_sense(model) == MOI.MIN_SENSE) ||
(var_basis_status[j] == MOI.NONBASIC_AT_UPPER && objective_sense(model) == MOI.MAX_SENSE)
return -c_red[j], Inf
end
return -Inf, -c_red[j]
end
basic = var_basis_status .== Ref(MOI.BASIC)
upper = var_basis_status[.!basic] .== Ref(MOI.NONBASIC_AT_UPPER)
ej = zeros(size(A, 1))
ej[findfirst(isequal(var), vars[basic[1:length(vars)]])] = 1.0
N_red = A[:, .!basic]' * (A[:, basic]' \ ej)
c_red = c_red[.!basic]
pos = N_red .> 0.0
neg = N_red .< 0.0
in_lb = (neg .& .!upper) .| (pos .& upper)
in_ub = (pos .& .!upper) .| (neg .& upper)
if objective_sense(model) == MOI.MAX_SENSE
in_lb, in_ub = in_ub, in_lb
end
# The reduced cost of variables fixed at equality do not be accounted for (they only change binding direction).
unfixed_vars = (var_lower .< var_upper)[.!basic]
in_lb .&= unfixed_vars
in_ub .&= unfixed_vars
# Find the first reduced cost that is strictly violated
lower_bounds_delta = [-Inf; c_red[in_lb] ./ N_red[in_lb]]
lower_bounds_delta_strict = lower_bounds_delta - [0.0; optimality_tolerance ./ abs.(N_red[in_lb])]
upper_bounds_delta = [Inf; c_red[in_ub] ./ N_red[in_ub]]
upper_bounds_delta_strict = upper_bounds_delta + [0.0; optimality_tolerance ./ abs.(N_red[in_ub])]
lower_max_idx = last(findmax(lower_bounds_delta_strict))
upper_min_idx = last(findmin(upper_bounds_delta_strict))
return lower_bounds_delta[lower_max_idx], upper_bounds_delta[upper_min_idx]
end