OR-Tools  8.2
bop_fs.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 #include "ortools/bop/bop_fs.h"
15 
16 #include <cstdint>
17 #include <limits>
18 #include <string>
19 #include <vector>
20 
21 #include "absl/memory/memory.h"
22 #include "absl/strings/str_format.h"
23 #include "google/protobuf/text_format.h"
26 #include "ortools/base/stl_util.h"
27 #include "ortools/glop/lp_solver.h"
30 #include "ortools/sat/lp_utils.h"
31 #include "ortools/sat/sat_solver.h"
32 #include "ortools/sat/symmetry.h"
33 #include "ortools/sat/util.h"
34 #include "ortools/util/bitset.h"
35 
36 namespace operations_research {
37 namespace bop {
38 namespace {
39 
40 using ::operations_research::glop::ColIndex;
42 using ::operations_research::glop::GlopParameters;
43 using ::operations_research::glop::RowIndex;
44 
45 BopOptimizerBase::Status SolutionStatus(const BopSolution& solution,
46  int64_t lower_bound) {
47  // The lower bound might be greater that the cost of a feasible solution due
48  // to rounding errors in the problem scaling and Glop.
49  return solution.IsFeasible() ? (solution.GetCost() <= lower_bound
53 }
54 
55 bool AllIntegralValues(const DenseRow& values, double tolerance) {
56  for (const glop::Fractional value : values) {
57  // Note that this test is correct because in this part of the code, Bop
58  // only deals with boolean variables.
59  if (value >= tolerance && value + tolerance < 1.0) {
60  return false;
61  }
62  }
63  return true;
64 }
65 
66 void DenseRowToBopSolution(const DenseRow& values, BopSolution* solution) {
67  CHECK(solution != nullptr);
68  CHECK_EQ(solution->Size(), values.size());
69  for (VariableIndex var(0); var < solution->Size(); ++var) {
70  solution->SetValue(var, round(values[ColIndex(var.value())]));
71  }
72 }
73 } // anonymous namespace
74 
75 //------------------------------------------------------------------------------
76 // GuidedSatFirstSolutionGenerator
77 //------------------------------------------------------------------------------
78 
80  const std::string& name, Policy policy)
82  policy_(policy),
83  abort_(false),
84  state_update_stamp_(ProblemState::kInitialStampValue),
85  sat_solver_() {}
86 
88 
89 BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::SynchronizeIfNeeded(
90  const ProblemState& problem_state) {
91  if (state_update_stamp_ == problem_state.update_stamp()) {
93  }
94  state_update_stamp_ = problem_state.update_stamp();
95 
96  // Create the sat_solver if not already done.
97  if (!sat_solver_) {
98  sat_solver_ = absl::make_unique<sat::SatSolver>();
99 
100  // Add in symmetries.
101  if (problem_state.GetParameters()
102  .exploit_symmetry_in_sat_first_solution()) {
103  std::vector<std::unique_ptr<SparsePermutation>> generators;
105  &generators);
106  std::unique_ptr<sat::SymmetryPropagator> propagator(
108  for (int i = 0; i < generators.size(); ++i) {
109  propagator->AddSymmetry(std::move(generators[i]));
110  }
111  sat_solver_->AddPropagator(propagator.get());
112  sat_solver_->TakePropagatorOwnership(std::move(propagator));
113  }
114  }
115 
116  const BopOptimizerBase::Status load_status =
117  LoadStateProblemToSatSolver(problem_state, sat_solver_.get());
118  if (load_status != BopOptimizerBase::CONTINUE) return load_status;
119 
120  switch (policy_) {
121  case Policy::kNotGuided:
122  break;
123  case Policy::kLpGuided:
124  for (ColIndex col(0); col < problem_state.lp_values().size(); ++col) {
125  const double value = problem_state.lp_values()[col];
126  sat_solver_->SetAssignmentPreference(
127  sat::Literal(sat::BooleanVariable(col.value()), round(value) == 1),
128  1 - fabs(value - round(value)));
129  }
130  break;
133  sat_solver_.get());
134  break;
135  case Policy::kUserGuided:
136  for (int i = 0; i < problem_state.assignment_preference().size(); ++i) {
137  sat_solver_->SetAssignmentPreference(
138  sat::Literal(sat::BooleanVariable(i),
139  problem_state.assignment_preference()[i]),
140  1.0);
141  }
142  break;
143  }
145 }
146 
148  const ProblemState& problem_state) const {
149  if (abort_) return false;
150  if (policy_ == Policy::kLpGuided && problem_state.lp_values().empty()) {
151  return false;
152  }
153  if (policy_ == Policy::kUserGuided &&
154  problem_state.assignment_preference().empty()) {
155  return false;
156  }
157  return true;
158 }
159 
161  const BopParameters& parameters, const ProblemState& problem_state,
162  LearnedInfo* learned_info, TimeLimit* time_limit) {
163  CHECK(learned_info != nullptr);
164  CHECK(time_limit != nullptr);
165  learned_info->Clear();
166 
167  const BopOptimizerBase::Status sync_status =
168  SynchronizeIfNeeded(problem_state);
169  if (sync_status != BopOptimizerBase::CONTINUE) return sync_status;
170 
171  sat::SatParameters sat_params;
172  sat_params.set_max_time_in_seconds(time_limit->GetTimeLeft());
173  sat_params.set_max_deterministic_time(time_limit->GetDeterministicTimeLeft());
174  sat_params.set_random_seed(parameters.random_seed());
175 
176  // We use a relatively small conflict limit so that other optimizer get a
177  // chance to run if this one is slow. Note that if this limit is reached, we
178  // will return BopOptimizerBase::CONTINUE so that Optimize() will be called
179  // again later to resume the current work.
180  sat_params.set_max_number_of_conflicts(
181  parameters.guided_sat_conflicts_chunk());
182  sat_solver_->SetParameters(sat_params);
183 
184  const double initial_deterministic_time = sat_solver_->deterministic_time();
185  const sat::SatSolver::Status sat_status = sat_solver_->Solve();
186  time_limit->AdvanceDeterministicTime(sat_solver_->deterministic_time() -
187  initial_deterministic_time);
188 
189  if (sat_status == sat::SatSolver::INFEASIBLE) {
190  if (policy_ != Policy::kNotGuided) abort_ = true;
191  if (problem_state.upper_bound() != std::numeric_limits<int64_t>::max()) {
192  // As the solution in the state problem is feasible, it is proved optimal.
193  learned_info->lower_bound = problem_state.upper_bound();
195  }
196  // The problem is proved infeasible
198  }
199 
200  ExtractLearnedInfoFromSatSolver(sat_solver_.get(), learned_info);
201  if (sat_status == sat::SatSolver::FEASIBLE) {
202  SatAssignmentToBopSolution(sat_solver_->Assignment(),
203  &learned_info->solution);
204  return SolutionStatus(learned_info->solution, problem_state.lower_bound());
205  }
206 
208 }
209 
210 //------------------------------------------------------------------------------
211 // BopRandomFirstSolutionGenerator
212 //------------------------------------------------------------------------------
214  const std::string& name, const BopParameters& parameters,
215  sat::SatSolver* sat_propagator, MTRandom* random)
217  random_(random),
218  sat_propagator_(sat_propagator) {}
219 
221 
222 // Only run the RandomFirstSolution when there is an objective to minimize.
224  const ProblemState& problem_state) const {
225  return problem_state.original_problem().objective().literals_size() > 0;
226 }
227 
229  const BopParameters& parameters, const ProblemState& problem_state,
230  LearnedInfo* learned_info, TimeLimit* time_limit) {
231  CHECK(learned_info != nullptr);
232  CHECK(time_limit != nullptr);
233  learned_info->Clear();
234 
235  // Save the current solver heuristics.
236  const sat::SatParameters saved_params = sat_propagator_->parameters();
237  const std::vector<std::pair<sat::Literal, double>> saved_prefs =
238  sat_propagator_->AllPreferences();
239 
240  const int kMaxNumConflicts = 10;
241  int64_t best_cost = problem_state.solution().IsFeasible()
242  ? problem_state.solution().GetCost()
244  int64_t remaining_num_conflicts =
245  parameters.max_number_of_conflicts_in_random_solution_generation();
246  int64_t old_num_failures = 0;
247 
248  // Optimization: Since each Solve() is really fast, we want to limit as
249  // much as possible the work around one.
250  bool objective_need_to_be_overconstrained =
251  (best_cost != std::numeric_limits<int64_t>::max());
252 
253  bool solution_found = false;
254  while (remaining_num_conflicts > 0 && !time_limit->LimitReached()) {
255  sat_propagator_->Backtrack(0);
256  old_num_failures = sat_propagator_->num_failures();
257 
258  sat::SatParameters sat_params = saved_params;
259  sat::RandomizeDecisionHeuristic(random_, &sat_params);
260  sat_params.set_max_number_of_conflicts(kMaxNumConflicts);
261  sat_propagator_->SetParameters(sat_params);
262  sat_propagator_->ResetDecisionHeuristic();
263 
264  if (objective_need_to_be_overconstrained) {
266  problem_state.original_problem(), false, sat::Coefficient(0),
267  true, sat::Coefficient(best_cost) - 1, sat_propagator_)) {
268  // The solution is proved optimal (if any).
269  learned_info->lower_bound = best_cost;
270  return best_cost == std::numeric_limits<int64_t>::max()
273  }
274  objective_need_to_be_overconstrained = false;
275  }
276 
277  // Special assignment preference parameters.
278  const int preference = random_->Uniform(4);
279  if (preference == 0) {
281  sat_propagator_);
282  } else if (preference == 1 && !problem_state.lp_values().empty()) {
283  // Assign SAT assignment preference based on the LP solution.
284  for (ColIndex col(0); col < problem_state.lp_values().size(); ++col) {
285  const double value = problem_state.lp_values()[col];
286  sat_propagator_->SetAssignmentPreference(
287  sat::Literal(sat::BooleanVariable(col.value()), round(value) == 1),
288  1 - fabs(value - round(value)));
289  }
290  }
291 
292  const sat::SatSolver::Status sat_status =
293  sat_propagator_->SolveWithTimeLimit(time_limit);
294  if (sat_status == sat::SatSolver::FEASIBLE) {
295  objective_need_to_be_overconstrained = true;
296  solution_found = true;
297  SatAssignmentToBopSolution(sat_propagator_->Assignment(),
298  &learned_info->solution);
299  CHECK_LT(learned_info->solution.GetCost(), best_cost);
300  best_cost = learned_info->solution.GetCost();
301  } else if (sat_status == sat::SatSolver::INFEASIBLE) {
302  // The solution is proved optimal (if any).
303  learned_info->lower_bound = best_cost;
304  return best_cost == std::numeric_limits<int64_t>::max()
307  }
308 
309  // The number of failure is a good approximation of the number of conflicts.
310  // Note that the number of failures of the SAT solver is not reinitialized.
311  remaining_num_conflicts -=
312  sat_propagator_->num_failures() - old_num_failures;
313  }
314 
315  // Restore sat_propagator_ to its original state.
316  // Note that if the function is aborted before that, it means we solved the
317  // problem to optimality (or proven it to be infeasible), so we don't need
318  // to do any extra work in these cases since the sat_propagator_ will not be
319  // used anymore.
320  CHECK_EQ(0, sat_propagator_->AssumptionLevel());
321  sat_propagator_->RestoreSolverToAssumptionLevel();
322  sat_propagator_->SetParameters(saved_params);
323  sat_propagator_->ResetDecisionHeuristicAndSetAllPreferences(saved_prefs);
324 
325  // This can be proved during the call to RestoreSolverToAssumptionLevel().
326  if (sat_propagator_->IsModelUnsat()) {
327  // The solution is proved optimal (if any).
328  learned_info->lower_bound = best_cost;
329  return best_cost == std::numeric_limits<int64_t>::max()
332  }
333 
334  ExtractLearnedInfoFromSatSolver(sat_propagator_, learned_info);
335 
336  return solution_found ? BopOptimizerBase::SOLUTION_FOUND
338 }
339 
340 //------------------------------------------------------------------------------
341 // LinearRelaxation
342 //------------------------------------------------------------------------------
344  const std::string& name)
346  parameters_(parameters),
347  state_update_stamp_(ProblemState::kInitialStampValue),
348  lp_model_loaded_(false),
349  num_full_solves_(0),
350  lp_model_(),
351  lp_solver_(),
352  scaling_(1),
353  offset_(0),
354  num_fixed_variables_(-1),
355  problem_already_solved_(false),
356  scaled_solution_cost_(glop::kInfinity) {}
357 
359 
360 BopOptimizerBase::Status LinearRelaxation::SynchronizeIfNeeded(
361  const ProblemState& problem_state) {
362  if (state_update_stamp_ == problem_state.update_stamp()) {
364  }
365  state_update_stamp_ = problem_state.update_stamp();
366 
367  // If this is a pure feasibility problem, obey
368  // `BopParameters.max_lp_solve_for_feasibility_problems`.
369  if (problem_state.original_problem().objective().literals_size() == 0 &&
370  parameters_.max_lp_solve_for_feasibility_problems() >= 0 &&
371  num_full_solves_ >= parameters_.max_lp_solve_for_feasibility_problems()) {
373  }
374 
375  // Check if the number of fixed variables is greater than last time.
376  // TODO(user): Consider checking changes in number of conflicts too.
377  int num_fixed_variables = 0;
378  for (const bool is_fixed : problem_state.is_fixed()) {
379  if (is_fixed) {
380  ++num_fixed_variables;
381  }
382  }
383  problem_already_solved_ =
384  problem_already_solved_ && num_fixed_variables_ >= num_fixed_variables;
385  if (problem_already_solved_) return BopOptimizerBase::ABORT;
386 
387  // Create the LP model based on the current problem state.
388  num_fixed_variables_ = num_fixed_variables;
389  if (!lp_model_loaded_) {
390  lp_model_.Clear();
392  &lp_model_);
393  lp_model_loaded_ = true;
394  }
395  for (VariableIndex var(0); var < problem_state.is_fixed().size(); ++var) {
396  if (problem_state.IsVariableFixed(var)) {
397  const glop::Fractional value =
398  problem_state.GetVariableFixedValue(var) ? 1.0 : 0.0;
399  lp_model_.SetVariableBounds(ColIndex(var.value()), value, value);
400  }
401  }
402 
403  // Add learned binary clauses.
404  if (parameters_.use_learned_binary_clauses_in_lp()) {
405  for (const sat::BinaryClause& clause :
406  problem_state.NewlyAddedBinaryClauses()) {
407  const RowIndex constraint_index = lp_model_.CreateNewConstraint();
408  const int64_t coefficient_a = clause.a.IsPositive() ? 1 : -1;
409  const int64_t coefficient_b = clause.b.IsPositive() ? 1 : -1;
410  const int64_t rhs = 1 + (clause.a.IsPositive() ? 0 : -1) +
411  (clause.b.IsPositive() ? 0 : -1);
412  const ColIndex col_a(clause.a.Variable().value());
413  const ColIndex col_b(clause.b.Variable().value());
414  const std::string name_a = lp_model_.GetVariableName(col_a);
415  const std::string name_b = lp_model_.GetVariableName(col_b);
416 
417  lp_model_.SetConstraintName(
418  constraint_index,
419  (clause.a.IsPositive() ? name_a : "not(" + name_a + ")") + " or " +
420  (clause.b.IsPositive() ? name_b : "not(" + name_b + ")"));
421  lp_model_.SetCoefficient(constraint_index, col_a, coefficient_a);
422  lp_model_.SetCoefficient(constraint_index, col_b, coefficient_b);
423  lp_model_.SetConstraintBounds(constraint_index, rhs, glop::kInfinity);
424  }
425  }
426 
427  scaling_ = problem_state.original_problem().objective().scaling_factor();
428  offset_ = problem_state.original_problem().objective().offset();
429  scaled_solution_cost_ =
430  problem_state.solution().IsFeasible()
431  ? problem_state.solution().GetScaledCost()
432  : (lp_model_.IsMaximizationProblem() ? -glop::kInfinity
433  : glop::kInfinity);
435 }
436 
437 // Always let the LP solver run if there is an objective. If there isn't, only
438 // let the LP solver run if the user asked for it by setting
439 // `BopParameters.max_lp_solve_for_feasibility_problems` to a non-zero value
440 // (a negative value means no limit).
441 // TODO(user): also deal with problem_already_solved_
442 bool LinearRelaxation::ShouldBeRun(const ProblemState& problem_state) const {
443  return problem_state.original_problem().objective().literals_size() > 0 ||
444  parameters_.max_lp_solve_for_feasibility_problems() != 0;
445 }
446 
448  const BopParameters& parameters, const ProblemState& problem_state,
449  LearnedInfo* learned_info, TimeLimit* time_limit) {
450  CHECK(learned_info != nullptr);
451  CHECK(time_limit != nullptr);
452  learned_info->Clear();
453 
454  const BopOptimizerBase::Status sync_status =
455  SynchronizeIfNeeded(problem_state);
456  if (sync_status != BopOptimizerBase::CONTINUE) {
457  return sync_status;
458  }
459 
460  const glop::ProblemStatus lp_status = Solve(false, time_limit);
461  VLOG(1) << " LP: "
462  << absl::StrFormat("%.6f", lp_solver_.GetObjectiveValue())
463  << " status: " << GetProblemStatusString(lp_status);
464 
465  if (lp_status == glop::ProblemStatus::OPTIMAL ||
466  lp_status == glop::ProblemStatus::IMPRECISE) {
467  ++num_full_solves_;
468  problem_already_solved_ = true;
469  }
470 
471  if (lp_status == glop::ProblemStatus::INIT) {
473  }
474  if (lp_status != glop::ProblemStatus::OPTIMAL &&
475  lp_status != glop::ProblemStatus::IMPRECISE &&
478  }
479  learned_info->lp_values = lp_solver_.variable_values();
480 
481  if (lp_status == glop::ProblemStatus::OPTIMAL) {
482  // The lp returns the objective with the offset and scaled, so we need to
483  // unscale it and then remove the offset.
484  double lower_bound = lp_solver_.GetObjectiveValue();
485  if (parameters_.use_lp_strong_branching()) {
486  lower_bound =
487  ComputeLowerBoundUsingStrongBranching(learned_info, time_limit);
488  VLOG(1) << " LP: "
489  << absl::StrFormat("%.6f", lower_bound)
490  << " using strong branching.";
491  }
492 
493  const int tolerance_sign = scaling_ < 0 ? 1 : -1;
494  const double unscaled_cost =
495  (lower_bound +
496  tolerance_sign *
497  lp_solver_.GetParameters().solution_feasibility_tolerance()) /
498  scaling_ -
499  offset_;
500  learned_info->lower_bound = static_cast<int64_t>(ceil(unscaled_cost));
501 
502  if (AllIntegralValues(
503  learned_info->lp_values,
504  lp_solver_.GetParameters().primal_feasibility_tolerance())) {
505  DenseRowToBopSolution(learned_info->lp_values, &learned_info->solution);
506  CHECK(learned_info->solution.IsFeasible());
508  }
509  }
510 
512 }
513 
514 // TODO(user): It is possible to stop the search earlier using the glop
515 // parameter objective_lower_limit / objective_upper_limit. That
516 // can be used when a feasible solution is known, or when the false
517 // best bound is computed.
518 glop::ProblemStatus LinearRelaxation::Solve(bool incremental_solve,
520  GlopParameters glop_params;
521  if (incremental_solve) {
522  glop_params.set_use_dual_simplex(true);
523  glop_params.set_allow_simplex_algorithm_change(true);
524  glop_params.set_use_preprocessing(false);
525  lp_solver_.SetParameters(glop_params);
526  }
527  NestedTimeLimit nested_time_limit(time_limit, time_limit->GetTimeLeft(),
528  parameters_.lp_max_deterministic_time());
529  const glop::ProblemStatus lp_status = lp_solver_.SolveWithTimeLimit(
530  lp_model_, nested_time_limit.GetTimeLimit());
531  return lp_status;
532 }
533 
534 double LinearRelaxation::ComputeLowerBoundUsingStrongBranching(
535  LearnedInfo* learned_info, TimeLimit* time_limit) {
536  const glop::DenseRow initial_lp_values = lp_solver_.variable_values();
537  const double tolerance =
538  lp_solver_.GetParameters().primal_feasibility_tolerance();
539  double best_lp_objective = lp_solver_.GetObjectiveValue();
540  for (glop::ColIndex col(0); col < initial_lp_values.size(); ++col) {
541  // TODO(user): Order the variables by some meaningful quantity (probably
542  // the cost variation when we snap it to one of its bound) so
543  // we can try the one that seems the most promising first.
544  // That way we can stop the strong branching earlier.
545  if (time_limit->LimitReached()) break;
546 
547  // Skip fixed variables.
548  if (lp_model_.variable_lower_bounds()[col] ==
549  lp_model_.variable_upper_bounds()[col]) {
550  continue;
551  }
552  CHECK_EQ(0.0, lp_model_.variable_lower_bounds()[col]);
553  CHECK_EQ(1.0, lp_model_.variable_upper_bounds()[col]);
554 
555  // Note(user): Experiments show that iterating on all variables can be
556  // costly and doesn't lead to better solutions when a SAT optimizer is used
557  // afterward, e.g. BopSatLpFirstSolutionGenerator, and no feasible solutions
558  // are available.
559  // No variables are skipped when a feasible solution is know as the best
560  // bound / cost comparison can be used to deduce fixed variables, and be
561  // useful for other optimizers.
562  if ((scaled_solution_cost_ == glop::kInfinity ||
563  scaled_solution_cost_ == -glop::kInfinity) &&
564  (initial_lp_values[col] < tolerance ||
565  initial_lp_values[col] + tolerance > 1)) {
566  continue;
567  }
568 
569  double objective_true = best_lp_objective;
570  double objective_false = best_lp_objective;
571 
572  // Set to true.
573  lp_model_.SetVariableBounds(col, 1.0, 1.0);
574  const glop::ProblemStatus status_true = Solve(true, time_limit);
575  // TODO(user): Deal with PRIMAL_INFEASIBLE, DUAL_INFEASIBLE and
576  // INFEASIBLE_OR_UNBOUNDED statuses. In all cases, if the
577  // original lp was feasible, this means that the variable can
578  // be fixed to the other bound.
579  if (status_true == glop::ProblemStatus::OPTIMAL ||
580  status_true == glop::ProblemStatus::DUAL_FEASIBLE) {
581  objective_true = lp_solver_.GetObjectiveValue();
582 
583  // Set to false.
584  lp_model_.SetVariableBounds(col, 0.0, 0.0);
585  const glop::ProblemStatus status_false = Solve(true, time_limit);
586  if (status_false == glop::ProblemStatus::OPTIMAL ||
587  status_false == glop::ProblemStatus::DUAL_FEASIBLE) {
588  objective_false = lp_solver_.GetObjectiveValue();
589 
590  // Compute the new min.
591  best_lp_objective =
592  lp_model_.IsMaximizationProblem()
593  ? std::min(best_lp_objective,
594  std::max(objective_true, objective_false))
595  : std::max(best_lp_objective,
596  std::min(objective_true, objective_false));
597  }
598  }
599 
600  if (CostIsWorseThanSolution(objective_true, tolerance)) {
601  // Having variable col set to true can't possibly lead to and better
602  // solution than the current one. Set the variable to false.
603  lp_model_.SetVariableBounds(col, 0.0, 0.0);
604  learned_info->fixed_literals.push_back(
605  sat::Literal(sat::BooleanVariable(col.value()), false));
606  } else if (CostIsWorseThanSolution(objective_false, tolerance)) {
607  // Having variable col set to false can't possibly lead to and better
608  // solution than the current one. Set the variable to true.
609  lp_model_.SetVariableBounds(col, 1.0, 1.0);
610  learned_info->fixed_literals.push_back(
611  sat::Literal(sat::BooleanVariable(col.value()), true));
612  } else {
613  // Unset. This is safe to use 0.0 and 1.0 as the variable is not fixed.
614  lp_model_.SetVariableBounds(col, 0.0, 1.0);
615  }
616  }
617  return best_lp_objective;
618 }
619 
620 bool LinearRelaxation::CostIsWorseThanSolution(double scaled_cost,
621  double tolerance) const {
622  return lp_model_.IsMaximizationProblem()
623  ? scaled_cost + tolerance < scaled_solution_cost_
624  : scaled_cost > scaled_solution_cost_ + tolerance;
625 }
626 
627 } // namespace bop
628 } // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define CHECK_LT(val1, val2)
Definition: base/logging.h:700
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:697
#define VLOG(verboselevel)
Definition: base/logging.h:978
bool empty() const
uint32 Uniform(uint32 n)
Definition: random.cc:40
Provides a way to nest time limits for algorithms where a certain part of the computation is bounded ...
Definition: time_limit.h:425
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
bool ShouldBeRun(const ProblemState &problem_state) const override
Definition: bop_fs.cc:223
BopRandomFirstSolutionGenerator(const std::string &name, const BopParameters &parameters, sat::SatSolver *sat_propagator, MTRandom *random)
Definition: bop_fs.cc:213
Status Optimize(const BopParameters &parameters, const ProblemState &problem_state, LearnedInfo *learned_info, TimeLimit *time_limit) override
Definition: bop_fs.cc:228
bool ShouldBeRun(const ProblemState &problem_state) const override
Definition: bop_fs.cc:147
GuidedSatFirstSolutionGenerator(const std::string &name, Policy policy)
Definition: bop_fs.cc:79
Status Optimize(const BopParameters &parameters, const ProblemState &problem_state, LearnedInfo *learned_info, TimeLimit *time_limit) override
Definition: bop_fs.cc:160
bool ShouldBeRun(const ProblemState &problem_state) const override
Definition: bop_fs.cc:442
Status Optimize(const BopParameters &parameters, const ProblemState &problem_state, LearnedInfo *learned_info, TimeLimit *time_limit) override
Definition: bop_fs.cc:447
LinearRelaxation(const BopParameters &parameters, const std::string &name)
Definition: bop_fs.cc:343
const BopParameters & GetParameters() const
Definition: bop_base.h:123
const sat::LinearBooleanProblem & original_problem() const
Definition: bop_base.h:201
const std::vector< bool > assignment_preference() const
Definition: bop_base.h:130
const glop::DenseRow & lp_values() const
Definition: bop_base.h:191
const std::vector< sat::BinaryClause > & NewlyAddedBinaryClauses() const
Definition: bop_base.cc:249
bool IsVariableFixed(VariableIndex var) const
Definition: bop_base.h:175
bool GetVariableFixedValue(VariableIndex var) const
Definition: bop_base.h:182
const BopSolution & solution() const
Definition: bop_base.h:196
const absl::StrongVector< VariableIndex, bool > & is_fixed() const
Definition: bop_base.h:176
const GlopParameters & GetParameters() const
Definition: lp_solver.cc:117
const DenseRow & variable_values() const
Definition: lp_solver.h:100
Fractional GetObjectiveValue() const
Definition: lp_solver.cc:476
ABSL_MUST_USE_RESULT ProblemStatus SolveWithTimeLimit(const LinearProgram &lp, TimeLimit *time_limit)
Definition: lp_solver.cc:127
void SetParameters(const GlopParameters &parameters)
Definition: lp_solver.cc:113
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:248
std::string GetVariableName(ColIndex col) const
Definition: lp_data.cc:359
void SetConstraintName(RowIndex row, absl::string_view name)
Definition: lp_data.cc:244
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Definition: lp_data.cc:316
const DenseRow & variable_lower_bounds() const
Definition: lp_data.h:228
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:308
const DenseRow & variable_upper_bounds() const
Definition: lp_data.h:231
std::vector< std::pair< Literal, double > > AllPreferences() const
Definition: sat_solver.h:153
const SatParameters & parameters() const
Definition: sat_solver.cc:110
void ResetDecisionHeuristicAndSetAllPreferences(const std::vector< std::pair< Literal, double >> &prefs)
Definition: sat_solver.h:159
Status SolveWithTimeLimit(TimeLimit *time_limit)
Definition: sat_solver.cc:968
void SetAssignmentPreference(Literal literal, double weight)
Definition: sat_solver.h:150
const VariablesAssignment & Assignment() const
Definition: sat_solver.h:362
void SetParameters(const SatParameters &parameters)
Definition: sat_solver.cc:115
void Backtrack(int target_level)
Definition: sat_solver.cc:888
SatParameters parameters
SharedTimeLimit * time_limit
const std::string name
int64 value
IntVar * var
Definition: expr_array.cc:1858
const int64 offset_
Definition: interval.cc:2076
ColIndex col
Definition: markowitz.cc:176
BopOptimizerBase::Status LoadStateProblemToSatSolver(const ProblemState &problem_state, sat::SatSolver *sat_solver)
Definition: bop_util.cc:88
void SatAssignmentToBopSolution(const sat::VariablesAssignment &assignment, BopSolution *solution)
Definition: bop_util.cc:122
void ExtractLearnedInfoFromSatSolver(sat::SatSolver *solver, LearnedInfo *info)
Definition: bop_util.cc:99
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
const double kInfinity
Definition: lp_types.h:83
bool AddObjectiveConstraint(const LinearBooleanProblem &problem, bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, Coefficient upper_bound, SatSolver *solver)
void UseObjectiveForSatAssignmentPreference(const LinearBooleanProblem &problem, SatSolver *solver)
void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem &problem, glop::LinearProgram *lp)
void FindLinearBooleanProblemSymmetries(const LinearBooleanProblem &problem, std::vector< std::unique_ptr< SparsePermutation >> *generators)
void RandomizeDecisionHeuristic(URBG *random, SatParameters *parameters)
Definition: sat/util.h:100
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Fractional scaled_cost