forked from google/or-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
linear_relaxation.h
192 lines (160 loc) · 8.65 KB
/
linear_relaxation.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
// Copyright 2010-2021 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_SAT_LINEAR_RELAXATION_H_
#define OR_TOOLS_SAT_LINEAR_RELAXATION_H_
#include <vector>
#include "ortools/sat/cp_model_mapping.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/linear_constraint.h"
#include "ortools/sat/linear_programming_constraint.h"
#include "ortools/sat/model.h"
namespace operations_research {
namespace sat {
struct LinearRelaxation {
std::vector<LinearConstraint> linear_constraints;
std::vector<std::vector<Literal>> at_most_ones;
std::vector<CutGenerator> cut_generators;
};
// Looks at all the encoding literal (li <=> var == value_i) that have a
// view and add a linear relaxation of their relationship with var.
//
// If the encoding is full, we can just add:
// - Sum li == 1
// - var == min_value + Sum li * (value_i - min_value)
//
// When the set of such encoding literals do not cover the full domain of var,
// we do something a bit more involved. Let min_not_encoded/max_not_encoded the
// min and max value of the domain of var that is NOT part of the encoding.
// We add:
// - Sum li <= 1
// - var >= (Sum li * value_i) + (1 - Sum li) * min_not_encoded
// - var <= (Sum li * value_i) + (1 - Sum li) * max_not_encoded
//
// Note of the special case where min_not_encoded == max_not_encoded that kind
// of reduce to the full encoding, except with a different "rhs" value.
//
// We also increment the corresponding counter if we added something. We
// consider the relaxation "tight" if the encoding was full or if
// min_not_encoded == max_not_encoded.
void AppendRelaxationForEqualityEncoding(IntegerVariable var,
const Model& model,
LinearRelaxation* relaxation,
int* num_tight, int* num_loose);
// This is a different relaxation that use a partial set of literal li such that
// (li <=> var >= xi). In which case we use the following encoding:
// - li >= l_{i+1} for all possible i. Note that the xi need to be sorted.
// - var >= min + l0 * (x0 - min) + Sum_{i>0} li * (xi - x_{i-1})
// - and same as above for NegationOf(var) for the upper bound.
//
// Like for AppendRelaxationForEqualityEncoding() we skip any li that do not
// have an integer view.
void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
const Model& model,
LinearRelaxation* relaxation);
// Returns a vector of new literals in exactly one relationship.
// In addition, this create an IntegerView for all these literals and also add
// the exactly one to the LinearRelaxation.
std::vector<Literal> CreateAlternativeLiteralsWithView(
int num_literals, Model* model, LinearRelaxation* relaxation);
void AppendBoolOrRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendBoolAndRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendAtMostOneRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendExactlyOneRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
// Adds linearization of int max constraints. Returns a vector of z vars such
// that: z_vars[l] == 1 <=> target = exprs[l].
//
// Consider the Lin Max constraint with d expressions and n variables in the
// form: target = max {exprs[l] = Sum (wli * xi + bl)}. l in {1,..,d}.
// Li = lower bound of xi
// Ui = upper bound of xi.
// Let zl be in {0,1} for all l in {1,..,d}.
// The target = exprs[l] when zl = 1.
//
// The following is a valid linearization for Lin Max.
// target >= exprs[l], for all l in {1,..,d}
// target <= Sum_i(wki * xi) + Sum_l((Nkl + bl) * zl), for all k in {1,..,d}
// Where Nkl is a large number defined as:
// Nkl = Sum_i(max((wli - wki)*Li, (wli - wki)*Ui))
// = Sum (max corner difference for variable i, target expr k, max expr l)
// Reference: "Strong mixed-integer programming formulations for trained neural
// networks" by Ross Anderson et. (https://arxiv.org/pdf/1811.01988.pdf).
// TODO(user): Support linear expression as target.
void AppendLinMaxRelaxationPart1(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendLinMaxRelaxationPart2(
IntegerVariable target, const std::vector<Literal>& alternative_literals,
const std::vector<LinearExpression>& exprs, Model* model,
LinearRelaxation* relaxation);
// Note: This only works if all affine expressions share the same variable.
void AppendMaxAffineRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
// Appends linear constraints to the relaxation. This also handles the
// relaxation of linear constraints with enforcement literals.
// A linear constraint lb <= ax <= ub with enforcement literals {ei} is relaxed
// as following.
// lb <= (Sum Negated(ei) * (lb - implied_lb)) + ax <= inf
// -inf <= (Sum Negated(ei) * (ub - implied_ub)) + ax <= ub
// Where implied_lb and implied_ub are trivial lower and upper bounds of the
// constraint.
void AppendLinearConstraintRelaxation(const ConstraintProto& ct,
bool linearize_enforced_constraints,
Model* model,
LinearRelaxation* relaxation);
void AppendCircuitRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendRoutesRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
// Adds linearization of no overlap constraints.
// It adds an energetic equation linking the duration of all potential tasks to
// the actual span of the no overlap constraint.
void AppendNoOverlapRelaxation(const CpModelProto& model_proto,
const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
// Adds linearization of cumulative constraints.The second part adds an
// energetic equation linking the duration of all potential tasks to the actual
// max span * capacity of the cumulative constraint.
void AppendCumulativeRelaxation(const CpModelProto& model_proto,
const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
// Cut generators.
void AddIntProdCutGenerator(const ConstraintProto& ct, int linearization_level,
Model* m, LinearRelaxation* relaxation);
void AddAllDiffCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddLinMaxCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddCircuitCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddRoutesCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddCumulativeCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddNoOverlapCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddNoOverlap2dCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
// Adds linearization of different types of constraints.
void TryToLinearizeConstraint(const CpModelProto& model_proto,
const ConstraintProto& ct,
int linearization_level, Model* model,
LinearRelaxation* relaxation);
// Builds the linear relaxation of a CpModelProto.
LinearRelaxation ComputeLinearRelaxation(const CpModelProto& model_proto,
Model* m);
} // namespace sat
} // namespace operations_research
#endif // OR_TOOLS_SAT_LINEAR_RELAXATION_H_