OR-Tools  8.2
preprocessor.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <limits>
17 
18 #include "absl/strings/str_format.h"
21 #include "ortools/glop/status.h"
26 
27 namespace operations_research {
28 namespace glop {
29 
31 
32 namespace {
33 // Returns an interval as an human readable string for debugging.
34 std::string IntervalString(Fractional lb, Fractional ub) {
35  return absl::StrFormat("[%g, %g]", lb, ub);
36 }
37 
38 #if defined(_MSC_VER)
39 double trunc(double d) { return d > 0 ? floor(d) : ceil(d); }
40 #endif
41 } // namespace
42 
43 // --------------------------------------------------------
44 // Preprocessor
45 // --------------------------------------------------------
46 Preprocessor::Preprocessor(const GlopParameters* parameters)
47  : status_(ProblemStatus::INIT),
48  parameters_(*parameters),
49  in_mip_context_(false),
50  infinite_time_limit_(TimeLimit::Infinite()),
51  time_limit_(infinite_time_limit_.get()) {}
53 
54 // --------------------------------------------------------
55 // MainLpPreprocessor
56 // --------------------------------------------------------
57 
58 #define RUN_PREPROCESSOR(name) \
59  RunAndPushIfRelevant(std::unique_ptr<Preprocessor>(new name(&parameters_)), \
60  #name, time_limit_, lp)
61 
63  RETURN_VALUE_IF_NULL(lp, false);
64  initial_num_rows_ = lp->num_constraints();
65  initial_num_cols_ = lp->num_variables();
66  initial_num_entries_ = lp->num_entries();
67  if (parameters_.use_preprocessing()) {
69 
70  // We run it a few times because running one preprocessor may allow another
71  // one to remove more stuff.
72  const int kMaxNumPasses = 20;
73  for (int i = 0; i < kMaxNumPasses; ++i) {
74  const int old_stack_size = preprocessors_.size();
83 
84  // Abort early if none of the preprocessors did something. Technically
85  // this is true if none of the preprocessors above needs postsolving,
86  // which has exactly the same meaning for these particular preprocessors.
87  if (preprocessors_.size() == old_stack_size) {
88  // We use i here because the last pass did nothing.
89  if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
90  LOG(INFO) << "Reached fixed point after presolve pass #" << i;
91  }
92  break;
93  }
94  }
97 
98  // TODO(user): Run them in the loop above if the effect on the running time
99  // is good. This needs more investigation.
102 
103  // If DualizerPreprocessor was run, we need to do some extra preprocessing.
104  // This is because it currently adds a lot of zero-cost singleton columns.
105  const int old_stack_size = preprocessors_.size();
106 
107  // TODO(user): We probably want to scale the costs before and after this
108  // preprocessor so that the rhs/objective of the dual are with a good
109  // magnitude.
111  if (old_stack_size != preprocessors_.size()) {
117  }
118 
120  }
121 
122  // The scaling is controled by use_scaling, not use_preprocessing.
124 
125  // This one must always run. It is needed by the revised simplex code.
127  return !preprocessors_.empty();
128 }
129 
130 #undef RUN_PREPROCESSOR
131 
132 void MainLpPreprocessor::RunAndPushIfRelevant(
133  std::unique_ptr<Preprocessor> preprocessor, const std::string& name,
135  RETURN_IF_NULL(preprocessor);
137  if (status_ != ProblemStatus::INIT || time_limit->LimitReached()) return;
138 
139  const double start_time = time_limit->GetElapsedTime();
140  preprocessor->SetTimeLimit(time_limit);
141 
142  // No need to run the preprocessor if the lp is empty.
143  // TODO(user): without this test, the code is failing as of 2013-03-18.
144  if (lp->num_variables() == 0 && lp->num_constraints() == 0) {
146  return;
147  }
148 
149  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
150  if (preprocessor->Run(lp)) {
151  const EntryIndex new_num_entries = lp->num_entries();
152  const double preprocess_time = time_limit->GetElapsedTime() - start_time;
153  if (log_info) {
154  LOG(INFO) << absl::StrFormat(
155  "%s(%fs): %d(%d) rows, %d(%d) columns, %d(%d) entries.", name,
156  preprocess_time, lp->num_constraints().value(),
157  (lp->num_constraints() - initial_num_rows_).value(),
158  lp->num_variables().value(),
159  (lp->num_variables() - initial_num_cols_).value(),
160  // static_cast<int64> is needed because the Android port uses int32.
161  static_cast<int64>(new_num_entries.value()),
162  static_cast<int64>(new_num_entries.value() -
163  initial_num_entries_.value()));
164  }
165  status_ = preprocessor->status();
166  preprocessors_.push_back(std::move(preprocessor));
167  return;
168  } else {
169  // Even if a preprocessor returns false (i.e. no need for postsolve), it
170  // can detect an issue with the problem.
171  status_ = preprocessor->status();
172  if (status_ != ProblemStatus::INIT && log_info) {
173  LOG(INFO) << name << " detected that the problem is "
175  }
176  }
177 }
178 
181  while (!preprocessors_.empty()) {
182  preprocessors_.back()->RecoverSolution(solution);
183  preprocessors_.pop_back();
184  }
185 }
186 
187 // --------------------------------------------------------
188 // ColumnDeletionHelper
189 // --------------------------------------------------------
190 
192  is_column_deleted_.clear();
193  stored_value_.clear();
194 }
195 
198 }
199 
201  ColIndex col, Fractional fixed_value, VariableStatus status) {
202  DCHECK_GE(col, 0);
203  if (col >= is_column_deleted_.size()) {
204  is_column_deleted_.resize(col + 1, false);
205  stored_value_.resize(col + 1, 0.0);
206  stored_status_.resize(col + 1, VariableStatus::FREE);
207  }
208  is_column_deleted_[col] = true;
209  stored_value_[col] = fixed_value;
210  stored_status_[col] = status;
211 }
212 
214  ProblemSolution* solution) const {
215  DenseRow new_primal_values;
216  VariableStatusRow new_variable_statuses;
217  ColIndex old_index(0);
218  for (ColIndex col(0); col < is_column_deleted_.size(); ++col) {
219  if (is_column_deleted_[col]) {
220  new_primal_values.push_back(stored_value_[col]);
221  new_variable_statuses.push_back(stored_status_[col]);
222  } else {
223  new_primal_values.push_back(solution->primal_values[old_index]);
224  new_variable_statuses.push_back(solution->variable_statuses[old_index]);
225  ++old_index;
226  }
227  }
228 
229  // Copy the end of the vectors and swap them with the ones in solution.
230  const ColIndex num_cols = solution->primal_values.size();
231  DCHECK_EQ(num_cols, solution->variable_statuses.size());
232  for (; old_index < num_cols; ++old_index) {
233  new_primal_values.push_back(solution->primal_values[old_index]);
234  new_variable_statuses.push_back(solution->variable_statuses[old_index]);
235  }
236  new_primal_values.swap(solution->primal_values);
237  new_variable_statuses.swap(solution->variable_statuses);
238 }
239 
240 // --------------------------------------------------------
241 // RowDeletionHelper
242 // --------------------------------------------------------
243 
244 void RowDeletionHelper::Clear() { is_row_deleted_.clear(); }
245 
247  DCHECK_GE(row, 0);
248  if (row >= is_row_deleted_.size()) {
249  is_row_deleted_.resize(row + 1, false);
250  }
251  is_row_deleted_[row] = true;
252 }
253 
255  if (row >= is_row_deleted_.size()) return;
256  is_row_deleted_[row] = false;
257 }
258 
260  return is_row_deleted_;
261 }
262 
264  DenseColumn new_dual_values;
265  ConstraintStatusColumn new_constraint_statuses;
266  RowIndex old_index(0);
267  const RowIndex end = is_row_deleted_.size();
268  for (RowIndex row(0); row < end; ++row) {
269  if (is_row_deleted_[row]) {
270  new_dual_values.push_back(0.0);
271  new_constraint_statuses.push_back(ConstraintStatus::BASIC);
272  } else {
273  new_dual_values.push_back(solution->dual_values[old_index]);
274  new_constraint_statuses.push_back(
275  solution->constraint_statuses[old_index]);
276  ++old_index;
277  }
278  }
279 
280  // Copy the end of the vectors and swap them with the ones in solution.
281  const RowIndex num_rows = solution->dual_values.size();
282  DCHECK_EQ(num_rows, solution->constraint_statuses.size());
283  for (; old_index < num_rows; ++old_index) {
284  new_dual_values.push_back(solution->dual_values[old_index]);
285  new_constraint_statuses.push_back(solution->constraint_statuses[old_index]);
286  }
287  new_dual_values.swap(solution->dual_values);
288  new_constraint_statuses.swap(solution->constraint_statuses);
289 }
290 
291 // --------------------------------------------------------
292 // EmptyColumnPreprocessor
293 // --------------------------------------------------------
294 
295 namespace {
296 
297 // Computes the status of a variable given its value and bounds. This only works
298 // with a value exactly at one of the bounds, or a value of 0.0 for free
299 // variables.
300 VariableStatus ComputeVariableStatus(Fractional value, Fractional lower_bound,
301  Fractional upper_bound) {
302  if (lower_bound == upper_bound) {
303  DCHECK_EQ(value, lower_bound);
304  DCHECK(IsFinite(lower_bound));
306  }
307  if (value == lower_bound) {
308  DCHECK_NE(lower_bound, -kInfinity);
310  }
311  if (value == upper_bound) {
312  DCHECK_NE(upper_bound, kInfinity);
314  }
315 
316  // TODO(user): restrict this to unbounded variables with a value of zero.
317  // We can't do that when postsolving infeasible problem. Don't call postsolve
318  // on an infeasible problem?
319  return VariableStatus::FREE;
320 }
321 
322 // Returns the input with the smallest magnitude or zero if both are infinite.
323 Fractional MinInMagnitudeOrZeroIfInfinite(Fractional a, Fractional b) {
324  const Fractional value = std::abs(a) < std::abs(b) ? a : b;
325  return IsFinite(value) ? value : 0.0;
326 }
327 
328 Fractional MagnitudeOrZeroIfInfinite(Fractional value) {
329  return IsFinite(value) ? std::abs(value) : 0.0;
330 }
331 
332 // Returns the maximum magnitude of the finite variable bounds of the given
333 // linear program.
334 Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram& lp) {
335  Fractional max_bounds_magnitude = 0.0;
336  const ColIndex num_cols = lp.num_variables();
337  for (ColIndex col(0); col < num_cols; ++col) {
338  max_bounds_magnitude = std::max(
339  max_bounds_magnitude,
340  std::max(MagnitudeOrZeroIfInfinite(lp.variable_lower_bounds()[col]),
341  MagnitudeOrZeroIfInfinite(lp.variable_upper_bounds()[col])));
342  }
343  return max_bounds_magnitude;
344 }
345 
346 } // namespace
347 
350  RETURN_VALUE_IF_NULL(lp, false);
351  column_deletion_helper_.Clear();
352  const ColIndex num_cols = lp->num_variables();
353  for (ColIndex col(0); col < num_cols; ++col) {
354  if (lp->GetSparseColumn(col).IsEmpty()) {
355  const Fractional lower_bound = lp->variable_lower_bounds()[col];
356  const Fractional upper_bound = lp->variable_upper_bounds()[col];
357  const Fractional objective_coefficient =
360  if (objective_coefficient == 0) {
361  // Any feasible value will do.
362  if (upper_bound != kInfinity) {
363  value = upper_bound;
364  } else {
365  if (lower_bound != -kInfinity) {
366  value = lower_bound;
367  } else {
368  value = Fractional(0.0);
369  }
370  }
371  } else {
372  value = objective_coefficient > 0 ? lower_bound : upper_bound;
373  if (!IsFinite(value)) {
374  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, empty column " << col
375  << " has a minimization cost of " << objective_coefficient
376  << " and bounds"
377  << " [" << lower_bound << "," << upper_bound << "]";
379  return false;
380  }
382  value * lp->objective_coefficients()[col]);
383  }
384  column_deletion_helper_.MarkColumnForDeletionWithState(
385  col, value, ComputeVariableStatus(value, lower_bound, upper_bound));
386  }
387  }
388  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
389  return !column_deletion_helper_.IsEmpty();
390 }
391 
394  RETURN_IF_NULL(solution);
395  column_deletion_helper_.RestoreDeletedColumns(solution);
396 }
397 
398 // --------------------------------------------------------
399 // ProportionalColumnPreprocessor
400 // --------------------------------------------------------
401 
402 namespace {
403 
404 // Subtracts 'multiple' times the column col of the given linear program from
405 // the constraint bounds. That is, for a non-zero entry of coefficient c,
406 // c * multiple is substracted from both the constraint upper and lower bound.
407 void SubtractColumnMultipleFromConstraintBound(ColIndex col,
408  Fractional multiple,
409  LinearProgram* lp) {
410  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
411  const RowIndex row = e.row();
412  const Fractional delta = multiple * e.coefficient();
415  }
416  // While not needed for correctness, this allows the presolved problem to
417  // have the same objective value as the original one.
419  lp->objective_coefficients()[col] * multiple);
420 }
421 
422 // Struct used to detect proportional columns with the same cost. For that, a
423 // vector of such struct will be sorted, and only the columns that end up
424 // together need to be compared.
425 struct ColumnWithRepresentativeAndScaledCost {
426  ColumnWithRepresentativeAndScaledCost(ColIndex _col, ColIndex _representative,
427  Fractional _scaled_cost)
428  : col(_col), representative(_representative), scaled_cost(_scaled_cost) {}
429  ColIndex col;
430  ColIndex representative;
432 
433  bool operator<(const ColumnWithRepresentativeAndScaledCost& other) const {
434  if (representative == other.representative) {
435  if (scaled_cost == other.scaled_cost) {
436  return col < other.col;
437  }
438  return scaled_cost < other.scaled_cost;
439  }
440  return representative < other.representative;
441  }
442 };
443 
444 } // namespace
445 
448  RETURN_VALUE_IF_NULL(lp, false);
450  lp->GetSparseMatrix(), parameters_.preprocessor_zero_tolerance());
451 
452  // Compute some statistics and make each class representative point to itself
453  // in the mapping. Also store the columns that are proportional to at least
454  // another column in proportional_columns to iterate on them more efficiently.
455  //
456  // TODO(user): Change FindProportionalColumns for this?
457  int num_proportionality_classes = 0;
458  std::vector<ColIndex> proportional_columns;
459  for (ColIndex col(0); col < mapping.size(); ++col) {
460  const ColIndex representative = mapping[col];
461  if (representative != kInvalidCol) {
462  if (mapping[representative] == kInvalidCol) {
463  proportional_columns.push_back(representative);
464  ++num_proportionality_classes;
465  mapping[representative] = representative;
466  }
467  proportional_columns.push_back(col);
468  }
469  }
470  if (proportional_columns.empty()) return false;
471  VLOG(1) << "The problem contains " << proportional_columns.size()
472  << " columns which belong to " << num_proportionality_classes
473  << " proportionality classes.";
474 
475  // Note(user): using the first coefficient may not give the best precision.
476  const ColIndex num_cols = lp->num_variables();
477  column_factors_.assign(num_cols, 0.0);
478  for (const ColIndex col : proportional_columns) {
479  const SparseColumn& column = lp->GetSparseColumn(col);
480  column_factors_[col] = column.GetFirstCoefficient();
481  }
482 
483  // This is only meaningful for column representative.
484  //
485  // The reduced cost of a column is 'cost - dual_values.column' and we know
486  // that for all proportional columns, 'dual_values.column /
487  // column_factors_[col]' is the same. Here, we bound this quantity which is
488  // related to the cost 'slope' of a proportional column:
489  // cost / column_factors_[col].
490  DenseRow slope_lower_bound(num_cols, -kInfinity);
491  DenseRow slope_upper_bound(num_cols, +kInfinity);
492  for (const ColIndex col : proportional_columns) {
493  const ColIndex representative = mapping[col];
494 
495  // We reason in terms of a minimization problem here.
496  const bool is_rc_positive_or_zero =
497  (lp->variable_upper_bounds()[col] == kInfinity);
498  const bool is_rc_negative_or_zero =
499  (lp->variable_lower_bounds()[col] == -kInfinity);
500  bool is_slope_upper_bounded = is_rc_positive_or_zero;
501  bool is_slope_lower_bounded = is_rc_negative_or_zero;
502  if (column_factors_[col] < 0.0) {
503  std::swap(is_slope_lower_bounded, is_slope_upper_bounded);
504  }
505  const Fractional slope =
507  column_factors_[col];
508  if (is_slope_lower_bounded) {
509  slope_lower_bound[representative] =
510  std::max(slope_lower_bound[representative], slope);
511  }
512  if (is_slope_upper_bounded) {
513  slope_upper_bound[representative] =
514  std::min(slope_upper_bound[representative], slope);
515  }
516  }
517 
518  // Deal with empty slope intervals.
519  for (const ColIndex col : proportional_columns) {
520  const ColIndex representative = mapping[col];
521 
522  // This is only needed for class representative columns.
523  if (representative == col) {
525  slope_lower_bound[representative],
526  slope_upper_bound[representative])) {
527  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, no feasible dual values"
528  << " can satisfy the constraints of the proportional columns"
529  << " with representative " << representative << "."
530  << " the associated quantity must be in ["
531  << slope_lower_bound[representative] << ","
532  << slope_upper_bound[representative] << "].";
534  return false;
535  }
536  }
537  }
538 
539  // Now, fix the columns that can be fixed to one of their bounds.
540  for (const ColIndex col : proportional_columns) {
541  const ColIndex representative = mapping[col];
542  const Fractional slope =
544  column_factors_[col];
545 
546  // The scaled reduced cost is slope - quantity.
547  bool variable_can_be_fixed = false;
548  Fractional target_bound = 0.0;
549 
550  const Fractional lower_bound = lp->variable_lower_bounds()[col];
551  const Fractional upper_bound = lp->variable_upper_bounds()[col];
552  if (!IsSmallerWithinFeasibilityTolerance(slope_lower_bound[representative],
553  slope)) {
554  // The scaled reduced cost is < 0.
555  variable_can_be_fixed = true;
556  target_bound = (column_factors_[col] >= 0.0) ? upper_bound : lower_bound;
558  slope, slope_upper_bound[representative])) {
559  // The scaled reduced cost is > 0.
560  variable_can_be_fixed = true;
561  target_bound = (column_factors_[col] >= 0.0) ? lower_bound : upper_bound;
562  }
563 
564  if (variable_can_be_fixed) {
565  // Clear mapping[col] so this column will not be considered for the next
566  // stage.
567  mapping[col] = kInvalidCol;
568  if (!IsFinite(target_bound)) {
569  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED.";
571  return false;
572  } else {
573  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
574  column_deletion_helper_.MarkColumnForDeletionWithState(
575  col, target_bound,
576  ComputeVariableStatus(target_bound, lower_bound, upper_bound));
577  }
578  }
579  }
580 
581  // Merge the variables with the same scaled cost.
582  std::vector<ColumnWithRepresentativeAndScaledCost> sorted_columns;
583  for (const ColIndex col : proportional_columns) {
584  const ColIndex representative = mapping[col];
585 
586  // This test is needed because we already removed some columns.
587  if (mapping[col] != kInvalidCol) {
588  sorted_columns.push_back(ColumnWithRepresentativeAndScaledCost(
590  lp->objective_coefficients()[col] / column_factors_[col]));
591  }
592  }
593  std::sort(sorted_columns.begin(), sorted_columns.end());
594 
595  // All this will be needed during postsolve.
596  merged_columns_.assign(num_cols, kInvalidCol);
597  lower_bounds_.assign(num_cols, -kInfinity);
598  upper_bounds_.assign(num_cols, kInfinity);
599  new_lower_bounds_.assign(num_cols, -kInfinity);
600  new_upper_bounds_.assign(num_cols, kInfinity);
601 
602  for (int i = 0; i < sorted_columns.size();) {
603  const ColIndex target_col = sorted_columns[i].col;
604  const ColIndex target_representative = sorted_columns[i].representative;
605  const Fractional target_scaled_cost = sorted_columns[i].scaled_cost;
606 
607  // Save the initial bounds before modifying them.
608  lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
609  upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
610 
611  int num_merged = 0;
612  for (++i; i < sorted_columns.size(); ++i) {
613  if (sorted_columns[i].representative != target_representative) break;
614  if (std::abs(sorted_columns[i].scaled_cost - target_scaled_cost) >=
615  parameters_.preprocessor_zero_tolerance()) {
616  break;
617  }
618  ++num_merged;
619  const ColIndex col = sorted_columns[i].col;
620  const Fractional lower_bound = lp->variable_lower_bounds()[col];
621  const Fractional upper_bound = lp->variable_upper_bounds()[col];
622  lower_bounds_[col] = lower_bound;
623  upper_bounds_[col] = upper_bound;
624  merged_columns_[col] = target_col;
625 
626  // This is a bit counter intuitive, but when a column is divided by x,
627  // the corresponding bounds have to be multiplied by x.
628  const Fractional bound_factor =
629  column_factors_[col] / column_factors_[target_col];
630 
631  // We need to shift the variable so that a basic solution of the new
632  // problem can easily be converted to a basic solution of the original
633  // problem.
634 
635  // A feasible value for the variable must be chosen, and the variable must
636  // be shifted by this value. This is done to make sure that it will be
637  // possible to recreate a basic solution of the original problem from a
638  // basic solution of the pre-solved problem during post-solve.
639  const Fractional target_value =
640  MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
641  Fractional lower_diff = (lower_bound - target_value) * bound_factor;
642  Fractional upper_diff = (upper_bound - target_value) * bound_factor;
643  if (bound_factor < 0.0) {
644  std::swap(lower_diff, upper_diff);
645  }
646  lp->SetVariableBounds(
647  target_col, lp->variable_lower_bounds()[target_col] + lower_diff,
648  lp->variable_upper_bounds()[target_col] + upper_diff);
649  SubtractColumnMultipleFromConstraintBound(col, target_value, lp);
650  column_deletion_helper_.MarkColumnForDeletionWithState(
651  col, target_value,
652  ComputeVariableStatus(target_value, lower_bound, upper_bound));
653  }
654 
655  // If at least one column was merged, the target column must be shifted like
656  // the other columns in the same equivalence class for the same reason (see
657  // above).
658  if (num_merged > 0) {
659  merged_columns_[target_col] = target_col;
660  const Fractional target_value = MinInMagnitudeOrZeroIfInfinite(
661  lower_bounds_[target_col], upper_bounds_[target_col]);
662  lp->SetVariableBounds(
663  target_col, lp->variable_lower_bounds()[target_col] - target_value,
664  lp->variable_upper_bounds()[target_col] - target_value);
665  SubtractColumnMultipleFromConstraintBound(target_col, target_value, lp);
666  new_lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
667  new_upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
668  }
669  }
670 
671  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
672  return !column_deletion_helper_.IsEmpty();
673 }
674 
676  ProblemSolution* solution) const {
678  RETURN_IF_NULL(solution);
679  column_deletion_helper_.RestoreDeletedColumns(solution);
680 
681  // The rest of this function is to unmerge the columns so that the solution be
682  // primal-feasible.
683  const ColIndex num_cols = merged_columns_.size();
684  DenseBooleanRow is_representative_basic(num_cols, false);
685  DenseBooleanRow is_distance_to_upper_bound(num_cols, false);
686  DenseRow distance_to_bound(num_cols, 0.0);
687  DenseRow wanted_value(num_cols, 0.0);
688 
689  // The first pass is a loop over the representatives to compute the current
690  // distance to the new bounds.
691  for (ColIndex col(0); col < num_cols; ++col) {
692  if (merged_columns_[col] == col) {
693  const Fractional value = solution->primal_values[col];
694  const Fractional distance_to_upper_bound = new_upper_bounds_[col] - value;
695  const Fractional distance_to_lower_bound = value - new_lower_bounds_[col];
696  if (distance_to_upper_bound < distance_to_lower_bound) {
697  distance_to_bound[col] = distance_to_upper_bound;
698  is_distance_to_upper_bound[col] = true;
699  } else {
700  distance_to_bound[col] = distance_to_lower_bound;
701  is_distance_to_upper_bound[col] = false;
702  }
703  is_representative_basic[col] =
705 
706  // Restore the representative value to a feasible value of the initial
707  // variable. Now all the merged variable are at a feasible value.
708  wanted_value[col] = value;
709  solution->primal_values[col] = MinInMagnitudeOrZeroIfInfinite(
710  lower_bounds_[col], upper_bounds_[col]);
711  solution->variable_statuses[col] = ComputeVariableStatus(
712  solution->primal_values[col], lower_bounds_[col], upper_bounds_[col]);
713  }
714  }
715 
716  // Second pass to correct the values.
717  for (ColIndex col(0); col < num_cols; ++col) {
718  const ColIndex representative = merged_columns_[col];
719  if (representative != kInvalidCol) {
720  if (IsFinite(distance_to_bound[representative])) {
721  // If the distance is finite, then each variable is set to its
722  // corresponding bound (the one from which the distance is computed) and
723  // is then changed by as much as possible until the distance is zero.
724  const Fractional bound_factor =
725  column_factors_[col] / column_factors_[representative];
726  const Fractional scaled_distance =
727  distance_to_bound[representative] / std::abs(bound_factor);
728  const Fractional width = upper_bounds_[col] - lower_bounds_[col];
729  const bool to_upper_bound =
730  (bound_factor > 0.0) == is_distance_to_upper_bound[representative];
731  if (width <= scaled_distance) {
732  solution->primal_values[col] =
733  to_upper_bound ? lower_bounds_[col] : upper_bounds_[col];
734  solution->variable_statuses[col] =
735  ComputeVariableStatus(solution->primal_values[col],
736  lower_bounds_[col], upper_bounds_[col]);
737  distance_to_bound[representative] -= width * std::abs(bound_factor);
738  } else {
739  solution->primal_values[col] =
740  to_upper_bound ? upper_bounds_[col] - scaled_distance
741  : lower_bounds_[col] + scaled_distance;
742  solution->variable_statuses[col] =
743  is_representative_basic[representative]
745  : ComputeVariableStatus(solution->primal_values[col],
746  lower_bounds_[col],
747  upper_bounds_[col]);
748  distance_to_bound[representative] = 0.0;
749  is_representative_basic[representative] = false;
750  }
751  } else {
752  // If the distance is not finite, then only one variable needs to be
753  // changed from its current feasible value in order to have a
754  // primal-feasible solution.
755  const Fractional error = wanted_value[representative];
756  if (error == 0.0) {
757  if (is_representative_basic[representative]) {
759  is_representative_basic[representative] = false;
760  }
761  } else {
762  const Fractional bound_factor =
763  column_factors_[col] / column_factors_[representative];
764  const bool use_this_variable =
765  (error * bound_factor > 0.0) ? (upper_bounds_[col] == kInfinity)
766  : (lower_bounds_[col] == -kInfinity);
767  if (use_this_variable) {
768  wanted_value[representative] = 0.0;
769  solution->primal_values[col] += error / bound_factor;
770  if (is_representative_basic[representative]) {
772  is_representative_basic[representative] = false;
773  } else {
774  // This should not happen on an OPTIMAL or FEASIBLE solution.
775  DCHECK(solution->status != ProblemStatus::OPTIMAL &&
778  }
779  }
780  }
781  }
782  }
783  }
784 }
785 
786 // --------------------------------------------------------
787 // ProportionalRowPreprocessor
788 // --------------------------------------------------------
789 
792  RETURN_VALUE_IF_NULL(lp, false);
793  const RowIndex num_rows = lp->num_constraints();
794  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
795 
796  // Use the first coefficient of each row to compute the proportionality
797  // factor. Note that the sign is important.
798  //
799  // Note(user): using the first coefficient may not give the best precision.
800  row_factors_.assign(num_rows, 0.0);
801  for (RowIndex row(0); row < num_rows; ++row) {
802  const SparseColumn& row_transpose = transpose.column(RowToColIndex(row));
803  if (!row_transpose.IsEmpty()) {
804  row_factors_[row] = row_transpose.GetFirstCoefficient();
805  }
806  }
807 
808  // The new row bounds (only meaningful for the proportional rows).
809  DenseColumn lower_bounds(num_rows, -kInfinity);
810  DenseColumn upper_bounds(num_rows, +kInfinity);
811 
812  // Where the new bounds are coming from. Only for the constraints that stay
813  // in the lp and are modified, kInvalidRow otherwise.
814  upper_bound_sources_.assign(num_rows, kInvalidRow);
815  lower_bound_sources_.assign(num_rows, kInvalidRow);
816 
817  // Initialization.
818  // We need the first representative of each proportional row class to point to
819  // itself for the loop below. TODO(user): Already return such a mapping from
820  // FindProportionalColumns()?
822  transpose, parameters_.preprocessor_zero_tolerance());
823  DenseBooleanColumn is_a_representative(num_rows, false);
824  int num_proportional_rows = 0;
825  for (RowIndex row(0); row < num_rows; ++row) {
826  const ColIndex representative_row_as_col = mapping[RowToColIndex(row)];
827  if (representative_row_as_col != kInvalidCol) {
828  mapping[representative_row_as_col] = representative_row_as_col;
829  is_a_representative[ColToRowIndex(representative_row_as_col)] = true;
830  ++num_proportional_rows;
831  }
832  }
833 
834  // Compute the bound of each representative as implied by the rows
835  // which are proportional to it. Also keep the source row of each bound.
836  for (RowIndex row(0); row < num_rows; ++row) {
837  const ColIndex row_as_col = RowToColIndex(row);
838  if (mapping[row_as_col] != kInvalidCol) {
839  // For now, delete all the rows that are proportional to another one.
840  // Note that we will unmark the final representative of this class later.
841  row_deletion_helper_.MarkRowForDeletion(row);
842  const RowIndex representative_row = ColToRowIndex(mapping[row_as_col]);
843 
844  const Fractional factor =
845  row_factors_[representative_row] / row_factors_[row];
846  Fractional implied_lb = factor * lp->constraint_lower_bounds()[row];
847  Fractional implied_ub = factor * lp->constraint_upper_bounds()[row];
848  if (factor < 0.0) {
849  std::swap(implied_lb, implied_ub);
850  }
851 
852  // TODO(user): if the bounds are equal, use the largest row in magnitude?
853  if (implied_lb >= lower_bounds[representative_row]) {
854  lower_bounds[representative_row] = implied_lb;
855  lower_bound_sources_[representative_row] = row;
856  }
857  if (implied_ub <= upper_bounds[representative_row]) {
858  upper_bounds[representative_row] = implied_ub;
859  upper_bound_sources_[representative_row] = row;
860  }
861  }
862  }
863 
864  // For maximum precision, and also to simplify the postsolve, we choose
865  // a representative for each class of proportional columns that has at least
866  // one of the two tightest bounds.
867  for (RowIndex row(0); row < num_rows; ++row) {
868  if (!is_a_representative[row]) continue;
869  const RowIndex lower_source = lower_bound_sources_[row];
870  const RowIndex upper_source = upper_bound_sources_[row];
871  lower_bound_sources_[row] = kInvalidRow;
872  upper_bound_sources_[row] = kInvalidRow;
873  DCHECK_NE(lower_source, kInvalidRow);
874  DCHECK_NE(upper_source, kInvalidRow);
875  if (lower_source == upper_source) {
876  // In this case, a simple change of representative is enough.
877  // The constraint bounds of the representative will not change.
878  DCHECK_NE(lower_source, kInvalidRow);
879  row_deletion_helper_.UnmarkRow(lower_source);
880  } else {
881  // Report ProblemStatus::PRIMAL_INFEASIBLE if the new lower bound is not
882  // lower than the new upper bound modulo the default tolerance.
884  upper_bounds[row])) {
886  return false;
887  }
888 
889  // Special case for fixed rows.
890  if (lp->constraint_lower_bounds()[lower_source] ==
891  lp->constraint_upper_bounds()[lower_source]) {
892  row_deletion_helper_.UnmarkRow(lower_source);
893  continue;
894  }
895  if (lp->constraint_lower_bounds()[upper_source] ==
896  lp->constraint_upper_bounds()[upper_source]) {
897  row_deletion_helper_.UnmarkRow(upper_source);
898  continue;
899  }
900 
901  // This is the only case where a more complex postsolve is needed.
902  // To maximize precision, the class representative is changed to either
903  // upper_source or lower_source depending of which row has the largest
904  // proportionality factor.
905  RowIndex new_representative = lower_source;
906  RowIndex other = upper_source;
907  if (std::abs(row_factors_[new_representative]) <
908  std::abs(row_factors_[other])) {
909  std::swap(new_representative, other);
910  }
911 
912  // Initialize the new bounds with the implied ones.
913  const Fractional factor =
914  row_factors_[new_representative] / row_factors_[other];
915  Fractional new_lb = factor * lp->constraint_lower_bounds()[other];
916  Fractional new_ub = factor * lp->constraint_upper_bounds()[other];
917  if (factor < 0.0) {
918  std::swap(new_lb, new_ub);
919  }
920 
921  lower_bound_sources_[new_representative] = new_representative;
922  upper_bound_sources_[new_representative] = new_representative;
923 
924  if (new_lb > lp->constraint_lower_bounds()[new_representative]) {
925  lower_bound_sources_[new_representative] = other;
926  } else {
927  new_lb = lp->constraint_lower_bounds()[new_representative];
928  }
929  if (new_ub < lp->constraint_upper_bounds()[new_representative]) {
930  upper_bound_sources_[new_representative] = other;
931  } else {
932  new_ub = lp->constraint_upper_bounds()[new_representative];
933  }
934  const RowIndex new_lower_source =
935  lower_bound_sources_[new_representative];
936  if (new_lower_source == upper_bound_sources_[new_representative]) {
937  row_deletion_helper_.UnmarkRow(new_lower_source);
938  lower_bound_sources_[new_representative] = kInvalidRow;
939  upper_bound_sources_[new_representative] = kInvalidRow;
940  continue;
941  }
942 
943  // Take care of small numerical imprecision by making sure that lb <= ub.
944  // Note that if the imprecision was greater than the tolerance, the code
945  // at the beginning of this block would have reported
946  // ProblemStatus::PRIMAL_INFEASIBLE.
948  if (new_lb > new_ub) {
949  if (lower_bound_sources_[new_representative] == new_representative) {
950  new_ub = lp->constraint_lower_bounds()[new_representative];
951  } else {
952  new_lb = lp->constraint_upper_bounds()[new_representative];
953  }
954  }
955  row_deletion_helper_.UnmarkRow(new_representative);
956  lp->SetConstraintBounds(new_representative, new_lb, new_ub);
957  }
958  }
959 
960  lp_is_maximization_problem_ = lp->IsMaximizationProblem();
961  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
962  return !row_deletion_helper_.IsEmpty();
963 }
964 
966  ProblemSolution* solution) const {
968  RETURN_IF_NULL(solution);
969  row_deletion_helper_.RestoreDeletedRows(solution);
970 
971  // Make sure that all non-zero dual values on the proportional rows are
972  // assigned to the correct row with the correct sign and that the statuses
973  // are correct.
974  const RowIndex num_rows = solution->dual_values.size();
975  for (RowIndex row(0); row < num_rows; ++row) {
976  const RowIndex lower_source = lower_bound_sources_[row];
977  const RowIndex upper_source = upper_bound_sources_[row];
978  if (lower_source == kInvalidRow && upper_source == kInvalidRow) continue;
979  DCHECK_NE(lower_source, upper_source);
980  DCHECK(lower_source == row || upper_source == row);
981 
982  // If the representative is ConstraintStatus::BASIC, then all rows in this
983  // class will be ConstraintStatus::BASIC and there is nothing to do.
985  if (status == ConstraintStatus::BASIC) continue;
986 
987  // If the row is FIXED it will behave as a row
988  // ConstraintStatus::AT_UPPER_BOUND or
989  // ConstraintStatus::AT_LOWER_BOUND depending on the corresponding dual
990  // variable sign.
992  const Fractional corrected_dual_value = lp_is_maximization_problem_
993  ? -solution->dual_values[row]
994  : solution->dual_values[row];
995  if (corrected_dual_value != 0.0) {
996  status = corrected_dual_value > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
998  }
999  }
1000 
1001  // If one of the two conditions below are true, set the row status to
1002  // ConstraintStatus::BASIC.
1003  // Note that the source which is not row can't be FIXED (see presolve).
1004  if (lower_source != row && status == ConstraintStatus::AT_LOWER_BOUND) {
1005  DCHECK_EQ(0.0, solution->dual_values[lower_source]);
1006  const Fractional factor = row_factors_[row] / row_factors_[lower_source];
1007  solution->dual_values[lower_source] = factor * solution->dual_values[row];
1008  solution->dual_values[row] = 0.0;
1010  solution->constraint_statuses[lower_source] =
1011  factor > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1013  }
1014  if (upper_source != row && status == ConstraintStatus::AT_UPPER_BOUND) {
1015  DCHECK_EQ(0.0, solution->dual_values[upper_source]);
1016  const Fractional factor = row_factors_[row] / row_factors_[upper_source];
1017  solution->dual_values[upper_source] = factor * solution->dual_values[row];
1018  solution->dual_values[row] = 0.0;
1020  solution->constraint_statuses[upper_source] =
1021  factor > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1023  }
1024 
1025  // If the row status is still ConstraintStatus::FIXED_VALUE, we need to
1026  // relax its status.
1028  solution->constraint_statuses[row] =
1029  lower_source != row ? ConstraintStatus::AT_UPPER_BOUND
1031  }
1032  }
1033 }
1034 
1035 // --------------------------------------------------------
1036 // FixedVariablePreprocessor
1037 // --------------------------------------------------------
1038 
1041  RETURN_VALUE_IF_NULL(lp, false);
1042  const ColIndex num_cols = lp->num_variables();
1043  for (ColIndex col(0); col < num_cols; ++col) {
1044  const Fractional lower_bound = lp->variable_lower_bounds()[col];
1045  const Fractional upper_bound = lp->variable_upper_bounds()[col];
1046  if (lower_bound == upper_bound) {
1047  const Fractional fixed_value = lower_bound;
1048  DCHECK(IsFinite(fixed_value));
1049 
1050  // We need to change the constraint bounds.
1051  SubtractColumnMultipleFromConstraintBound(col, fixed_value, lp);
1052  column_deletion_helper_.MarkColumnForDeletionWithState(
1053  col, fixed_value, VariableStatus::FIXED_VALUE);
1054  }
1055  }
1056 
1057  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1058  return !column_deletion_helper_.IsEmpty();
1059 }
1060 
1062  ProblemSolution* solution) const {
1064  RETURN_IF_NULL(solution);
1065  column_deletion_helper_.RestoreDeletedColumns(solution);
1066 }
1067 
1068 // --------------------------------------------------------
1069 // ForcingAndImpliedFreeConstraintPreprocessor
1070 // --------------------------------------------------------
1071 
1074  RETURN_VALUE_IF_NULL(lp, false);
1075  const RowIndex num_rows = lp->num_constraints();
1076 
1077  // Compute the implied constraint bounds from the variable bounds.
1078  DenseColumn implied_lower_bounds(num_rows, 0);
1079  DenseColumn implied_upper_bounds(num_rows, 0);
1080  const ColIndex num_cols = lp->num_variables();
1081  StrictITIVector<RowIndex, int> row_degree(num_rows, 0);
1082  for (ColIndex col(0); col < num_cols; ++col) {
1083  const Fractional lower = lp->variable_lower_bounds()[col];
1084  const Fractional upper = lp->variable_upper_bounds()[col];
1085  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1086  const RowIndex row = e.row();
1087  const Fractional coeff = e.coefficient();
1088  if (coeff > 0.0) {
1089  implied_lower_bounds[row] += lower * coeff;
1090  implied_upper_bounds[row] += upper * coeff;
1091  } else {
1092  implied_lower_bounds[row] += upper * coeff;
1093  implied_upper_bounds[row] += lower * coeff;
1094  }
1095  ++row_degree[row];
1096  }
1097  }
1098 
1099  // Note that the ScalingPreprocessor is currently executed last, so here the
1100  // problem has not been scaled yet.
1101  int num_implied_free_constraints = 0;
1102  int num_forcing_constraints = 0;
1103  is_forcing_up_.assign(num_rows, false);
1104  DenseBooleanColumn is_forcing_down(num_rows, false);
1105  for (RowIndex row(0); row < num_rows; ++row) {
1106  if (row_degree[row] == 0) continue;
1107  Fractional lower = lp->constraint_lower_bounds()[row];
1108  Fractional upper = lp->constraint_upper_bounds()[row];
1109 
1110  // Check for infeasibility.
1112  implied_upper_bounds[row]) ||
1113  !IsSmallerWithinFeasibilityTolerance(implied_lower_bounds[row],
1114  upper)) {
1115  VLOG(1) << "implied bound " << implied_lower_bounds[row] << " "
1116  << implied_upper_bounds[row];
1117  VLOG(1) << "constraint bound " << lower << " " << upper;
1119  return false;
1120  }
1121 
1122  // Check if the constraint is forcing. That is, all the variables that
1123  // appear in it must be at one of their bounds.
1124  if (IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1125  lower)) {
1126  is_forcing_down[row] = true;
1127  ++num_forcing_constraints;
1128  continue;
1129  }
1131  implied_lower_bounds[row])) {
1132  is_forcing_up_[row] = true;
1133  ++num_forcing_constraints;
1134  continue;
1135  }
1136 
1137  // We relax the constraint bounds only if the constraint is implied to be
1138  // free. Such constraints will later be deleted by the
1139  // FreeConstraintPreprocessor.
1140  //
1141  // Note that we could relax only one of the two bounds, but the impact this
1142  // would have on the revised simplex algorithm is unclear at this point.
1144  implied_lower_bounds[row]) &&
1145  IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1146  upper)) {
1148  ++num_implied_free_constraints;
1149  }
1150  }
1151 
1152  if (num_implied_free_constraints > 0) {
1153  VLOG(1) << num_implied_free_constraints << " implied free constraints.";
1154  }
1155 
1156  if (num_forcing_constraints > 0) {
1157  VLOG(1) << num_forcing_constraints << " forcing constraints.";
1158  lp_is_maximization_problem_ = lp->IsMaximizationProblem();
1159  deleted_columns_.PopulateFromZero(num_rows, num_cols);
1160  costs_.resize(num_cols, 0.0);
1161  for (ColIndex col(0); col < num_cols; ++col) {
1162  const SparseColumn& column = lp->GetSparseColumn(col);
1163  const Fractional lower = lp->variable_lower_bounds()[col];
1164  const Fractional upper = lp->variable_upper_bounds()[col];
1165  bool is_forced = false;
1166  Fractional target_bound = 0.0;
1167  for (const SparseColumn::Entry e : column) {
1168  if (is_forcing_down[e.row()]) {
1169  const Fractional candidate = e.coefficient() < 0.0 ? lower : upper;
1170  if (is_forced && candidate != target_bound) {
1171  // The bounds are really close, so we fix to the bound with
1172  // the lowest magnitude. As of 2019/11/19, this is "better" than
1173  // fixing to the mid-point, because at postsolve, we always put
1174  // non-basic variables to their exact bounds (so, with mid-point
1175  // there would be a difference of epsilon/2 between the inner
1176  // solution and the postsolved one, which might cause issues).
1177  if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1178  target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1179  continue;
1180  }
1181  VLOG(1) << "A variable is forced in both directions! bounds: ["
1182  << std::fixed << std::setprecision(10) << lower << ", "
1183  << upper << "]. coeff:" << e.coefficient();
1185  return false;
1186  }
1187  target_bound = candidate;
1188  is_forced = true;
1189  }
1190  if (is_forcing_up_[e.row()]) {
1191  const Fractional candidate = e.coefficient() < 0.0 ? upper : lower;
1192  if (is_forced && candidate != target_bound) {
1193  // The bounds are really close, so we fix to the bound with
1194  // the lowest magnitude.
1195  if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1196  target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1197  continue;
1198  }
1199  VLOG(1) << "A variable is forced in both directions! bounds: ["
1200  << std::fixed << std::setprecision(10) << lower << ", "
1201  << upper << "]. coeff:" << e.coefficient();
1203  return false;
1204  }
1205  target_bound = candidate;
1206  is_forced = true;
1207  }
1208  }
1209  if (is_forced) {
1210  // Fix the variable, update the constraint bounds and save this column
1211  // and its cost for the postsolve.
1212  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
1213  column_deletion_helper_.MarkColumnForDeletionWithState(
1214  col, target_bound,
1215  ComputeVariableStatus(target_bound, lower, upper));
1216  deleted_columns_.mutable_column(col)->PopulateFromSparseVector(column);
1217  costs_[col] = lp->objective_coefficients()[col];
1218  }
1219  }
1220  for (RowIndex row(0); row < num_rows; ++row) {
1221  // In theory, an M exists such that for any magnitude >= M, we will be at
1222  // an optimal solution. However, because of numerical errors, if the value
1223  // is too large, it causes problem when verifying the solution. So we
1224  // select the smallest such M (at least a resonably small one) during
1225  // postsolve. It is the reason why we need to store the columns that were
1226  // fixed.
1227  if (is_forcing_down[row] || is_forcing_up_[row]) {
1228  row_deletion_helper_.MarkRowForDeletion(row);
1229  }
1230  }
1231  }
1232 
1233  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1234  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1235  return !column_deletion_helper_.IsEmpty();
1236 }
1237 
1239  ProblemSolution* solution) const {
1241  RETURN_IF_NULL(solution);
1242  column_deletion_helper_.RestoreDeletedColumns(solution);
1243  row_deletion_helper_.RestoreDeletedRows(solution);
1244 
1245  // Compute for each deleted columns the last deleted row in which it appears.
1246  const ColIndex num_cols = deleted_columns_.num_cols();
1247  ColToRowMapping last_deleted_row(num_cols, kInvalidRow);
1248  for (ColIndex col(0); col < num_cols; ++col) {
1249  if (!column_deletion_helper_.IsColumnMarked(col)) continue;
1250  for (const SparseColumn::Entry e : deleted_columns_.column(col)) {
1251  const RowIndex row = e.row();
1252  if (row_deletion_helper_.IsRowMarked(row)) {
1253  last_deleted_row[col] = row;
1254  }
1255  }
1256  }
1257 
1258  // For each deleted row (in order), compute a bound on the dual values so
1259  // that all the deleted columns for which this row is the last deleted row are
1260  // dual-feasible. Note that for the other columns, it will always be possible
1261  // to make them dual-feasible with a later row.
1262  // There are two possible outcomes:
1263  // - The dual value stays 0.0, and nothing changes.
1264  // - The bounds enforce a non-zero dual value, and one column will have a
1265  // reduced cost of 0.0. This column becomes VariableStatus::BASIC, and the
1266  // constraint status is changed to ConstraintStatus::AT_LOWER_BOUND,
1267  // ConstraintStatus::AT_UPPER_BOUND or ConstraintStatus::FIXED_VALUE.
1268  SparseMatrix transpose;
1269  transpose.PopulateFromTranspose(deleted_columns_);
1270  const RowIndex num_rows = solution->dual_values.size();
1271  for (RowIndex row(0); row < num_rows; ++row) {
1272  if (row_deletion_helper_.IsRowMarked(row)) {
1273  Fractional new_dual_value = 0.0;
1274  ColIndex new_basic_column = kInvalidCol;
1275  for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
1276  const ColIndex col = RowToColIndex(e.row());
1277  if (last_deleted_row[col] != row) continue;
1278  const Fractional scalar_product =
1279  ScalarProduct(solution->dual_values, deleted_columns_.column(col));
1280  const Fractional reduced_cost = costs_[col] - scalar_product;
1281  const Fractional bound = reduced_cost / e.coefficient();
1282  if (is_forcing_up_[row] == !lp_is_maximization_problem_) {
1283  if (bound < new_dual_value) {
1284  new_dual_value = bound;
1285  new_basic_column = col;
1286  }
1287  } else {
1288  if (bound > new_dual_value) {
1289  new_dual_value = bound;
1290  new_basic_column = col;
1291  }
1292  }
1293  }
1294  if (new_basic_column != kInvalidCol) {
1295  solution->dual_values[row] = new_dual_value;
1296  solution->variable_statuses[new_basic_column] = VariableStatus::BASIC;
1297  solution->constraint_statuses[row] =
1298  is_forcing_up_[row] ? ConstraintStatus::AT_UPPER_BOUND
1300  }
1301  }
1302  }
1303 }
1304 
1305 // --------------------------------------------------------
1306 // ImpliedFreePreprocessor
1307 // --------------------------------------------------------
1308 
1309 namespace {
1310 struct ColWithDegree {
1311  ColIndex col;
1312  EntryIndex num_entries;
1313  ColWithDegree(ColIndex c, EntryIndex n) : col(c), num_entries(n) {}
1314  bool operator<(const ColWithDegree& other) const {
1315  if (num_entries == other.num_entries) {
1316  return col < other.col;
1317  }
1318  return num_entries < other.num_entries;
1319  }
1320 };
1321 } // namespace
1322 
1325  RETURN_VALUE_IF_NULL(lp, false);
1326  const RowIndex num_rows = lp->num_constraints();
1327  const ColIndex num_cols = lp->num_variables();
1328 
1329  // For each constraint with n entries and each of its variable, we want the
1330  // bounds implied by the (n - 1) other variables and the constraint. We
1331  // use two handy utility classes that allow us to do that efficiently while
1332  // dealing properly with infinite bounds.
1333  const int size = num_rows.value();
1334  // TODO(user) : Replace SumWithNegativeInfiniteAndOneMissing and
1335  // SumWithPositiveInfiniteAndOneMissing with IntervalSumWithOneMissing.
1337  size);
1339  size);
1340 
1341  // Initialize the sums by adding all the bounds of the variables.
1342  for (ColIndex col(0); col < num_cols; ++col) {
1343  const Fractional lower_bound = lp->variable_lower_bounds()[col];
1344  const Fractional upper_bound = lp->variable_upper_bounds()[col];
1345  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1346  Fractional entry_lb = e.coefficient() * lower_bound;
1347  Fractional entry_ub = e.coefficient() * upper_bound;
1348  if (e.coefficient() < 0.0) std::swap(entry_lb, entry_ub);
1349  lb_sums[e.row()].Add(entry_lb);
1350  ub_sums[e.row()].Add(entry_ub);
1351  }
1352  }
1353 
1354  // The inequality
1355  // constraint_lb <= sum(entries) <= constraint_ub
1356  // can be rewritten as:
1357  // sum(entries) + (-activity) = 0,
1358  // where (-activity) has bounds [-constraint_ub, -constraint_lb].
1359  // We use this latter convention to simplify our code.
1360  for (RowIndex row(0); row < num_rows; ++row) {
1361  lb_sums[row].Add(-lp->constraint_upper_bounds()[row]);
1362  ub_sums[row].Add(-lp->constraint_lower_bounds()[row]);
1363  }
1364 
1365  // Once a variable is freed, none of the rows in which it appears can be
1366  // used to make another variable free.
1367  DenseBooleanColumn used_rows(num_rows, false);
1368  postsolve_status_of_free_variables_.assign(num_cols, VariableStatus::FREE);
1369  variable_offsets_.assign(num_cols, 0.0);
1370 
1371  // It is better to process columns with a small degree first:
1372  // - Degree-two columns make it possible to remove a row from the problem.
1373  // - This way there is more chance to make more free columns.
1374  // - It is better to have low degree free columns since a free column will
1375  // always end up in the simplex basis (except if there is more than the
1376  // number of rows in the problem).
1377  //
1378  // TODO(user): Only process degree-two so in subsequent passes more degree-two
1379  // columns could be made free. And only when no other reduction can be
1380  // applied, process the higher degree column?
1381  //
1382  // TODO(user): Be smarter about the order that maximizes the number of free
1383  // column. For instance if we have 3 doubleton columns that use the rows (1,2)
1384  // (2,3) and (3,4) then it is better not to make (2,3) free so the two other
1385  // two can be made free.
1386  std::vector<ColWithDegree> col_by_degree;
1387  for (ColIndex col(0); col < num_cols; ++col) {
1388  col_by_degree.push_back(
1389  ColWithDegree(col, lp->GetSparseColumn(col).num_entries()));
1390  }
1391  std::sort(col_by_degree.begin(), col_by_degree.end());
1392 
1393  // Now loop over the columns in order and make all implied-free columns free.
1394  int num_already_free_variables = 0;
1395  int num_implied_free_variables = 0;
1396  int num_fixed_variables = 0;
1397  for (ColWithDegree col_with_degree : col_by_degree) {
1398  const ColIndex col = col_with_degree.col;
1399 
1400  // If the variable is alreay free or fixed, we do nothing.
1401  const Fractional lower_bound = lp->variable_lower_bounds()[col];
1402  const Fractional upper_bound = lp->variable_upper_bounds()[col];
1403  if (!IsFinite(lower_bound) && !IsFinite(upper_bound)) {
1404  ++num_already_free_variables;
1405  continue;
1406  }
1407  if (lower_bound == upper_bound) continue;
1408 
1409  // Detect if the variable is implied free.
1410  Fractional overall_implied_lb = -kInfinity;
1411  Fractional overall_implied_ub = kInfinity;
1412  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1413  // If the row contains another implied free variable, then the bounds
1414  // implied by it will just be [-kInfinity, kInfinity] so we can skip it.
1415  if (used_rows[e.row()]) continue;
1416 
1417  // This is the contribution of this column to the sum above.
1418  const Fractional coeff = e.coefficient();
1419  Fractional entry_lb = coeff * lower_bound;
1420  Fractional entry_ub = coeff * upper_bound;
1421  if (coeff < 0.0) std::swap(entry_lb, entry_ub);
1422 
1423  // If X is the variable with index col and Y the sum of all the other
1424  // variables and of (-activity), then coeff * X + Y = 0. Since Y's bounds
1425  // are [lb_sum without X, ub_sum without X], it is easy to derive the
1426  // implied bounds on X.
1427  Fractional implied_lb = -ub_sums[e.row()].SumWithout(entry_ub) / coeff;
1428  Fractional implied_ub = -lb_sums[e.row()].SumWithout(entry_lb) / coeff;
1429  if (coeff < 0.0) std::swap(implied_lb, implied_ub);
1430  overall_implied_lb = std::max(overall_implied_lb, implied_lb);
1431  overall_implied_ub = std::min(overall_implied_ub, implied_ub);
1432  }
1433 
1434  // Detect infeasible cases.
1435  if (!IsSmallerWithinFeasibilityTolerance(overall_implied_lb, upper_bound) ||
1436  !IsSmallerWithinFeasibilityTolerance(lower_bound, overall_implied_ub) ||
1437  !IsSmallerWithinFeasibilityTolerance(overall_implied_lb,
1438  overall_implied_ub)) {
1440  return false;
1441  }
1442 
1443  // Detect fixed variable cases (there are two kinds).
1444  // Note that currently we don't do anything here except counting them.
1446  overall_implied_lb) ||
1447  IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1448  lower_bound)) {
1449  // This case is already dealt with by the
1450  // ForcingAndImpliedFreeConstraintPreprocessor since it means that (at
1451  // least) one of the row is forcing.
1452  ++num_fixed_variables;
1453  continue;
1454  } else if (IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1455  overall_implied_lb)) {
1456  // TODO(user): As of July 2013, with our preprocessors this case is never
1457  // triggered on the Netlib. Note however that if it appears it can have a
1458  // big impact since by fixing the variable, the two involved constraints
1459  // are forcing and can be removed too (with all the variables they touch).
1460  // The postsolve step is quite involved though.
1461  ++num_fixed_variables;
1462  continue;
1463  }
1464 
1465  // Is the variable implied free? Note that for an infinite lower_bound or
1466  // upper_bound the respective inequality is always true.
1468  overall_implied_lb) &&
1469  IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1470  upper_bound)) {
1471  ++num_implied_free_variables;
1473  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1474  used_rows[e.row()] = true;
1475  }
1476 
1477  // This is a tricky part. We're freeing this variable, which means that
1478  // after solve, the modified variable will have status either
1479  // VariableStatus::FREE or VariableStatus::BASIC. In the former case
1480  // (VariableStatus::FREE, value = 0.0), we need to "fix" the
1481  // status (technically, our variable isn't free!) to either
1482  // VariableStatus::AT_LOWER_BOUND or VariableStatus::AT_UPPER_BOUND
1483  // (note that we skipped fixed variables), and "fix" the value to that
1484  // bound's value as well. We make the decision and the precomputation
1485  // here: we simply offset the variable by one of its bounds, and store
1486  // which bound that was. Note that if the modified variable turns out to
1487  // be VariableStatus::BASIC, we'll simply un-offset its value too;
1488  // and let the status be VariableStatus::BASIC.
1489  //
1490  // TODO(user): This trick is already used in the DualizerPreprocessor,
1491  // maybe we should just have a preprocessor that shifts all the variables
1492  // bounds to have at least one of them at 0.0, will that improve precision
1493  // and speed of the simplex? One advantage is that we can compute the
1494  // new constraint bounds with better precision using AccurateSum.
1495  DCHECK_NE(lower_bound, upper_bound);
1496  const Fractional offset =
1497  MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
1498  if (offset != 0.0) {
1499  variable_offsets_[col] = offset;
1500  SubtractColumnMultipleFromConstraintBound(col, offset, lp);
1501  }
1502  postsolve_status_of_free_variables_[col] =
1503  ComputeVariableStatus(offset, lower_bound, upper_bound);
1504  }
1505  }
1506  VLOG(1) << num_already_free_variables << " free variables in the problem.";
1507  VLOG(1) << num_implied_free_variables << " implied free columns.";
1508  VLOG(1) << num_fixed_variables << " variables can be fixed.";
1509  return num_implied_free_variables > 0;
1510 }
1511 
1514  RETURN_IF_NULL(solution);
1515  const ColIndex num_cols = solution->variable_statuses.size();
1516  for (ColIndex col(0); col < num_cols; ++col) {
1517  // Skip variables that the preprocessor didn't change.
1518  if (postsolve_status_of_free_variables_[col] == VariableStatus::FREE) {
1519  DCHECK_EQ(0.0, variable_offsets_[col]);
1520  continue;
1521  }
1522  if (solution->variable_statuses[col] == VariableStatus::FREE) {
1523  solution->variable_statuses[col] =
1524  postsolve_status_of_free_variables_[col];
1525  } else {
1527  }
1528  solution->primal_values[col] += variable_offsets_[col];
1529  }
1530 }
1531 
1532 // --------------------------------------------------------
1533 // DoubletonFreeColumnPreprocessor
1534 // --------------------------------------------------------
1535 
1538  RETURN_VALUE_IF_NULL(lp, false);
1539  // We will modify the matrix transpose and then push the change to the linear
1540  // program by calling lp->UseTransposeMatrixAsReference(). Note
1541  // that original_matrix will not change during this preprocessor run.
1542  const SparseMatrix& original_matrix = lp->GetSparseMatrix();
1543  SparseMatrix* transpose = lp->GetMutableTransposeSparseMatrix();
1544 
1545  const ColIndex num_cols(lp->num_variables());
1546  for (ColIndex doubleton_col(0); doubleton_col < num_cols; ++doubleton_col) {
1547  // Only consider doubleton free columns.
1548  if (original_matrix.column(doubleton_col).num_entries() != 2) continue;
1549  if (lp->variable_lower_bounds()[doubleton_col] != -kInfinity) continue;
1550  if (lp->variable_upper_bounds()[doubleton_col] != kInfinity) continue;
1551 
1552  // Collect the two column items. Note that we skip a column involving a
1553  // deleted row since it is no longer a doubleton then.
1554  RestoreInfo r;
1555  r.col = doubleton_col;
1556  r.objective_coefficient = lp->objective_coefficients()[r.col];
1557  int index = 0;
1558  for (const SparseColumn::Entry e : original_matrix.column(r.col)) {
1559  if (row_deletion_helper_.IsRowMarked(e.row())) break;
1560  r.row[index] = e.row();
1561  r.coeff[index] = e.coefficient();
1562  DCHECK_NE(0.0, e.coefficient());
1563  ++index;
1564  }
1565  if (index != NUM_ROWS) continue;
1566 
1567  // Since the column didn't touch any previously deleted row, we are sure
1568  // that the coefficients were left untouched.
1569  DCHECK_EQ(r.coeff[DELETED], transpose->column(RowToColIndex(r.row[DELETED]))
1570  .LookUpCoefficient(ColToRowIndex(r.col)));
1571  DCHECK_EQ(r.coeff[MODIFIED],
1572  transpose->column(RowToColIndex(r.row[MODIFIED]))
1573  .LookUpCoefficient(ColToRowIndex(r.col)));
1574 
1575  // We prefer deleting the row with the larger coefficient magnitude because
1576  // we will divide by this magnitude. TODO(user): Impact?
1577  if (std::abs(r.coeff[DELETED]) < std::abs(r.coeff[MODIFIED])) {
1578  std::swap(r.coeff[DELETED], r.coeff[MODIFIED]);
1579  std::swap(r.row[DELETED], r.row[MODIFIED]);
1580  }
1581 
1582  // Save the deleted row for postsolve. Note that we remove it from the
1583  // transpose at the same time. This last operation is not strictly needed,
1584  // but it is faster to do it this way (both here and later when we will take
1585  // the transpose of the final transpose matrix).
1586  r.deleted_row_as_column.Swap(
1587  transpose->mutable_column(RowToColIndex(r.row[DELETED])));
1588 
1589  // Move the bound of the deleted constraint to the initially free variable.
1590  {
1591  Fractional new_variable_lb =
1592  lp->constraint_lower_bounds()[r.row[DELETED]];
1593  Fractional new_variable_ub =
1594  lp->constraint_upper_bounds()[r.row[DELETED]];
1595  new_variable_lb /= r.coeff[DELETED];
1596  new_variable_ub /= r.coeff[DELETED];
1597  if (r.coeff[DELETED] < 0.0) std::swap(new_variable_lb, new_variable_ub);
1598  lp->SetVariableBounds(r.col, new_variable_lb, new_variable_ub);
1599  }
1600 
1601  // Add a multiple of the deleted row to the modified row except on
1602  // column r.col where the coefficient will be left unchanged.
1603  r.deleted_row_as_column.AddMultipleToSparseVectorAndIgnoreCommonIndex(
1604  -r.coeff[MODIFIED] / r.coeff[DELETED], ColToRowIndex(r.col),
1605  parameters_.drop_tolerance(),
1606  transpose->mutable_column(RowToColIndex(r.row[MODIFIED])));
1607 
1608  // We also need to correct the objective value of the variables involved in
1609  // the deleted row.
1610  if (r.objective_coefficient != 0.0) {
1611  for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1612  const ColIndex col = RowToColIndex(e.row());
1613  if (col == r.col) continue;
1614  const Fractional new_objective =
1615  lp->objective_coefficients()[col] -
1616  e.coefficient() * r.objective_coefficient / r.coeff[DELETED];
1617 
1618  // This detects if the objective should actually be zero, but because of
1619  // the numerical error in the formula above, we have a really low
1620  // objective instead. The logic is the same as in
1621  // AddMultipleToSparseVectorAndIgnoreCommonIndex().
1622  if (std::abs(new_objective) > parameters_.drop_tolerance()) {
1623  lp->SetObjectiveCoefficient(col, new_objective);
1624  } else {
1625  lp->SetObjectiveCoefficient(col, 0.0);
1626  }
1627  }
1628  }
1629  row_deletion_helper_.MarkRowForDeletion(r.row[DELETED]);
1630  restore_stack_.push_back(r);
1631  }
1632 
1633  if (!row_deletion_helper_.IsEmpty()) {
1634  // The order is important.
1636  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1637  return true;
1638  }
1639  return false;
1640 }
1641 
1643  ProblemSolution* solution) const {
1645  row_deletion_helper_.RestoreDeletedRows(solution);
1646  for (const RestoreInfo& r : Reverse(restore_stack_)) {
1647  // Correct the constraint status.
1648  switch (solution->variable_statuses[r.col]) {
1650  solution->constraint_statuses[r.row[DELETED]] =
1652  break;
1654  solution->constraint_statuses[r.row[DELETED]] =
1655  r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1657  break;
1659  solution->constraint_statuses[r.row[DELETED]] =
1660  r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1662  break;
1663  case VariableStatus::FREE:
1664  solution->constraint_statuses[r.row[DELETED]] = ConstraintStatus::FREE;
1665  break;
1666  case VariableStatus::BASIC:
1667  // The default is good here:
1668  DCHECK_EQ(solution->constraint_statuses[r.row[DELETED]],
1670  break;
1671  }
1672 
1673  // Correct the primal variable value.
1674  {
1675  Fractional new_variable_value = solution->primal_values[r.col];
1676  for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1677  const ColIndex col = RowToColIndex(e.row());
1678  if (col == r.col) continue;
1679  new_variable_value -= (e.coefficient() / r.coeff[DELETED]) *
1680  solution->primal_values[RowToColIndex(e.row())];
1681  }
1682  solution->primal_values[r.col] = new_variable_value;
1683  }
1684 
1685  // In all cases, we will make the variable r.col VariableStatus::BASIC, so
1686  // we need to adjust the dual value of the deleted row so that the variable
1687  // reduced cost is zero. Note that there is nothing to do if the variable
1688  // was already basic.
1689  if (solution->variable_statuses[r.col] != VariableStatus::BASIC) {
1690  solution->variable_statuses[r.col] = VariableStatus::BASIC;
1691  Fractional current_reduced_cost =
1692  r.objective_coefficient -
1693  r.coeff[MODIFIED] * solution->dual_values[r.row[MODIFIED]];
1694  // We want current_reduced_cost - dual * coeff = 0, so:
1695  solution->dual_values[r.row[DELETED]] =
1696  current_reduced_cost / r.coeff[DELETED];
1697  } else {
1698  DCHECK_EQ(solution->dual_values[r.row[DELETED]], 0.0);
1699  }
1700  }
1701 }
1702 
1703 // --------------------------------------------------------
1704 // UnconstrainedVariablePreprocessor
1705 // --------------------------------------------------------
1706 
1707 namespace {
1708 
1709 // Does the constraint block the variable to go to infinity in the given
1710 // direction? direction is either positive or negative and row is the index of
1711 // the constraint.
1712 bool IsConstraintBlockingVariable(const LinearProgram& lp, Fractional direction,
1713  RowIndex row) {
1714  return direction > 0.0 ? lp.constraint_upper_bounds()[row] != kInfinity
1716 }
1717 
1718 } // namespace
1719 
1721  ColIndex col, Fractional target_bound, LinearProgram* lp) {
1722  DCHECK_EQ(0.0, lp->objective_coefficients()[col]);
1723  if (deleted_rows_as_column_.IsEmpty()) {
1724  deleted_columns_.PopulateFromZero(lp->num_constraints(),
1725  lp->num_variables());
1726  deleted_rows_as_column_.PopulateFromZero(
1729  rhs_.resize(lp->num_constraints(), 0.0);
1730  activity_sign_correction_.resize(lp->num_constraints(), 1.0);
1731  is_unbounded_.resize(lp->num_variables(), false);
1732  }
1733  const bool is_unbounded_up = (target_bound == kInfinity);
1734  const SparseColumn& column = lp->GetSparseColumn(col);
1735  for (const SparseColumn::Entry e : column) {
1736  const RowIndex row = e.row();
1737  if (!row_deletion_helper_.IsRowMarked(row)) {
1738  row_deletion_helper_.MarkRowForDeletion(row);
1739  const ColIndex row_as_col = RowToColIndex(row);
1740  deleted_rows_as_column_.mutable_column(row_as_col)
1742  lp->GetTransposeSparseMatrix().column(row_as_col));
1743  }
1744  const bool is_constraint_upper_bound_relevant =
1745  e.coefficient() > 0.0 ? !is_unbounded_up : is_unbounded_up;
1746  activity_sign_correction_[row] =
1747  is_constraint_upper_bound_relevant ? 1.0 : -1.0;
1748  rhs_[row] = is_constraint_upper_bound_relevant
1749  ? lp->constraint_upper_bounds()[row]
1750  : lp->constraint_lower_bounds()[row];
1751 
1752  // TODO(user): Here, we may render the row free, so subsequent columns
1753  // processed by the columns loop in Run() have more chance to be removed.
1754  // However, we need to be more careful during the postsolve() if we do that.
1755  }
1756  is_unbounded_[col] = true;
1757  Fractional initial_feasible_value = MinInMagnitudeOrZeroIfInfinite(
1759  deleted_columns_.mutable_column(col)->PopulateFromSparseVector(column);
1760  column_deletion_helper_.MarkColumnForDeletionWithState(
1761  col, initial_feasible_value,
1762  ComputeVariableStatus(initial_feasible_value,
1763  lp->variable_lower_bounds()[col],
1764  lp->variable_upper_bounds()[col]));
1765 }
1766 
1769  RETURN_VALUE_IF_NULL(lp, false);
1770 
1771  // To simplify the problem if something is almost zero, we use the low
1772  // tolerance (1e-9 by default) to be defensive. But to detect an infeasibility
1773  // we want to be sure (especially since the problem is not scaled in the
1774  // presolver) so we use an higher tolerance.
1775  //
1776  // TODO(user): Expose it as a parameter. We could rename both to
1777  // preprocessor_low_tolerance and preprocessor_high_tolerance.
1778  const Fractional low_tolerance = parameters_.preprocessor_zero_tolerance();
1779  const Fractional high_tolerance = 1e-4;
1780 
1781  // We start by the dual variable bounds from the constraints.
1782  const RowIndex num_rows = lp->num_constraints();
1783  dual_lb_.assign(num_rows, -kInfinity);
1784  dual_ub_.assign(num_rows, kInfinity);
1785  for (RowIndex row(0); row < num_rows; ++row) {
1786  if (lp->constraint_lower_bounds()[row] == -kInfinity) {
1787  dual_ub_[row] = 0.0;
1788  }
1789  if (lp->constraint_upper_bounds()[row] == kInfinity) {
1790  dual_lb_[row] = 0.0;
1791  }
1792  }
1793 
1794  const ColIndex num_cols = lp->num_variables();
1795  may_have_participated_lb_.assign(num_cols, false);
1796  may_have_participated_ub_.assign(num_cols, false);
1797 
1798  // We maintain a queue of columns to process.
1799  std::deque<ColIndex> columns_to_process;
1800  DenseBooleanRow in_columns_to_process(num_cols, true);
1801  std::vector<RowIndex> changed_rows;
1802  for (ColIndex col(0); col < num_cols; ++col) {
1803  columns_to_process.push_back(col);
1804  }
1805 
1806  // Arbitrary limit to avoid corner cases with long loops.
1807  // TODO(user): expose this as a parameter? IMO it isn't really needed as we
1808  // shouldn't reach this limit except in corner cases.
1809  const int limit = 5 * num_cols.value();
1810  for (int count = 0; !columns_to_process.empty() && count < limit; ++count) {
1811  const ColIndex col = columns_to_process.front();
1812  columns_to_process.pop_front();
1813  in_columns_to_process[col] = false;
1814  if (column_deletion_helper_.IsColumnMarked(col)) continue;
1815 
1816  const SparseColumn& column = lp->GetSparseColumn(col);
1817  const Fractional col_cost =
1819  const Fractional col_lb = lp->variable_lower_bounds()[col];
1820  const Fractional col_ub = lp->variable_upper_bounds()[col];
1821 
1822  // Compute the bounds on the reduced costs of this column.
1825  rc_lb.Add(col_cost);
1826  rc_ub.Add(col_cost);
1827  for (const SparseColumn::Entry e : column) {
1828  if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1829  const Fractional coeff = e.coefficient();
1830  if (coeff > 0.0) {
1831  rc_lb.Add(-coeff * dual_ub_[e.row()]);
1832  rc_ub.Add(-coeff * dual_lb_[e.row()]);
1833  } else {
1834  rc_lb.Add(-coeff * dual_lb_[e.row()]);
1835  rc_ub.Add(-coeff * dual_ub_[e.row()]);
1836  }
1837  }
1838 
1839  // If the reduced cost domain do not contain zero (modulo the tolerance), we
1840  // can move the variable to its corresponding bound. Note that we need to be
1841  // careful that this variable didn't participate in creating the used
1842  // reduced cost bound in the first place.
1843  bool can_be_removed = false;
1845  bool rc_is_away_from_zero;
1846  if (rc_ub.Sum() <= low_tolerance) {
1847  can_be_removed = true;
1848  target_bound = col_ub;
1849  rc_is_away_from_zero = rc_ub.Sum() <= -high_tolerance;
1850  can_be_removed = !may_have_participated_ub_[col];
1851  }
1852  if (rc_lb.Sum() >= -low_tolerance) {
1853  // The second condition is here for the case we can choose one of the two
1854  // directions.
1855  if (!can_be_removed || !IsFinite(target_bound)) {
1856  can_be_removed = true;
1857  target_bound = col_lb;
1858  rc_is_away_from_zero = rc_lb.Sum() >= high_tolerance;
1859  can_be_removed = !may_have_participated_lb_[col];
1860  }
1861  }
1862 
1863  if (can_be_removed) {
1864  if (IsFinite(target_bound)) {
1865  // Note that in MIP context, this assumes that the bounds of an integer
1866  // variable are integer.
1867  column_deletion_helper_.MarkColumnForDeletionWithState(
1868  col, target_bound,
1869  ComputeVariableStatus(target_bound, col_lb, col_ub));
1870  continue;
1871  }
1872 
1873  // If the target bound is infinite and the reduced cost bound is non-zero,
1874  // then the problem is ProblemStatus::INFEASIBLE_OR_UNBOUNDED.
1875  if (rc_is_away_from_zero) {
1876  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, variable " << col
1877  << " can move to " << target_bound
1878  << " and its reduced cost is in [" << rc_lb.Sum() << ", "
1879  << rc_ub.Sum() << "]";
1881  return false;
1882  } else {
1883  // We can remove this column and all its constraints! We just need to
1884  // choose proper variable values during the call to RecoverSolution()
1885  // that make all the constraints satisfiable. Unfortunately, this is not
1886  // so easy to do in the general case, so we only deal with a simpler
1887  // case when the cost of the variable is zero, and the constraints do
1888  // not block it in one direction.
1889  //
1890  // TODO(user): deal with the more generic case.
1891  if (col_cost != 0.0) continue;
1892  bool skip = false;
1893  for (const SparseColumn::Entry e : column) {
1894  // Note that it is important to check the rows that are already
1895  // deleted here, otherwise the post-solve will not work.
1896  if (IsConstraintBlockingVariable(*lp, e.coefficient(), e.row())) {
1897  skip = true;
1898  break;
1899  }
1900  }
1901  if (skip) continue;
1902 
1903  // TODO(user): this also works if the variable is integer, but we must
1904  // choose an integer value during the post-solve. Implement this.
1905  if (in_mip_context_) continue;
1907  continue;
1908  }
1909  }
1910 
1911  // The rest of the code will update the dual bounds. There is no need to do
1912  // it if the column was removed or if it is not unconstrained in some
1913  // direction.
1914  DCHECK(!can_be_removed);
1915  if (col_lb != -kInfinity && col_ub != kInfinity) continue;
1916 
1917  // For MIP, we only exploit the constraints. TODO(user): It should probably
1918  // work with only small modification, investigate.
1919  if (in_mip_context_) continue;
1920 
1921  changed_rows.clear();
1922  for (const SparseColumn::Entry e : column) {
1923  if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1924  const Fractional c = e.coefficient();
1925  const RowIndex row = e.row();
1926  if (col_ub == kInfinity) {
1927  if (c > 0.0) {
1928  const Fractional candidate = rc_ub.SumWithout(-c * dual_lb_[row]) / c;
1929  if (candidate < dual_ub_[row]) {
1930  dual_ub_[row] = candidate;
1931  may_have_participated_lb_[col] = true;
1932  changed_rows.push_back(row);
1933  }
1934  } else {
1935  const Fractional candidate = rc_ub.SumWithout(-c * dual_ub_[row]) / c;
1936  if (candidate > dual_lb_[row]) {
1937  dual_lb_[row] = candidate;
1938  may_have_participated_lb_[col] = true;
1939  changed_rows.push_back(row);
1940  }
1941  }
1942  }
1943  if (col_lb == -kInfinity) {
1944  if (c > 0.0) {
1945  const Fractional candidate = rc_lb.SumWithout(-c * dual_ub_[row]) / c;
1946  if (candidate > dual_lb_[row]) {
1947  dual_lb_[row] = candidate;
1948  may_have_participated_ub_[col] = true;
1949  changed_rows.push_back(row);
1950  }
1951  } else {
1952  const Fractional candidate = rc_lb.SumWithout(-c * dual_lb_[row]) / c;
1953  if (candidate < dual_ub_[row]) {
1954  dual_ub_[row] = candidate;
1955  may_have_participated_ub_[col] = true;
1956  changed_rows.push_back(row);
1957  }
1958  }
1959  }
1960  }
1961 
1962  if (!changed_rows.empty()) {
1963  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
1964  for (const RowIndex row : changed_rows) {
1965  for (const SparseColumn::Entry entry :
1966  transpose.column(RowToColIndex(row))) {
1967  const ColIndex col = RowToColIndex(entry.row());
1968  if (!in_columns_to_process[col]) {
1969  columns_to_process.push_back(col);
1970  in_columns_to_process[col] = true;
1971  }
1972  }
1973  }
1974  }
1975  }
1976 
1977  // Change the rhs to reflect the fixed variables. Note that is important to do
1978  // that after all the calls to RemoveZeroCostUnconstrainedVariable() because
1979  // RemoveZeroCostUnconstrainedVariable() needs to store the rhs before this
1980  // modification!
1981  const ColIndex end = column_deletion_helper_.GetMarkedColumns().size();
1982  for (ColIndex col(0); col < end; ++col) {
1983  if (column_deletion_helper_.IsColumnMarked(col)) {
1984  const Fractional target_bound =
1985  column_deletion_helper_.GetStoredValue()[col];
1986  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
1987  }
1988  }
1989 
1990  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1991  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1992  return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
1993 }
1994 
1996  ProblemSolution* solution) const {
1998  RETURN_IF_NULL(solution);
1999  column_deletion_helper_.RestoreDeletedColumns(solution);
2000  row_deletion_helper_.RestoreDeletedRows(solution);
2001 
2002  // Compute the last deleted column index for each deleted rows.
2003  const RowIndex num_rows = solution->dual_values.size();
2004  RowToColMapping last_deleted_column(num_rows, kInvalidCol);
2005  for (RowIndex row(0); row < num_rows; ++row) {
2006  if (row_deletion_helper_.IsRowMarked(row)) {
2007  for (const SparseColumn::Entry e :
2008  deleted_rows_as_column_.column(RowToColIndex(row))) {
2009  const ColIndex col = RowToColIndex(e.row());
2010  if (is_unbounded_[col]) {
2011  last_deleted_column[row] = col;
2012  }
2013  }
2014  }
2015  }
2016 
2017  // Note that this will be empty if there were no deleted rows.
2018  const ColIndex num_cols = is_unbounded_.size();
2019  for (ColIndex col(0); col < num_cols; ++col) {
2020  if (!is_unbounded_[col]) continue;
2021  Fractional primal_value_shift = 0.0;
2022  RowIndex row_at_bound = kInvalidRow;
2023  for (const SparseColumn::Entry e : deleted_columns_.column(col)) {
2024  const RowIndex row = e.row();
2025  // The second condition is for VariableStatus::FREE rows.
2026  // TODO(user): In presense of free row, we must move them to 0.
2027  // Note that currently VariableStatus::FREE rows should be removed before
2028  // this is called.
2029  DCHECK(IsFinite(rhs_[row]));
2030  if (last_deleted_column[row] != col || !IsFinite(rhs_[row])) continue;
2031  const SparseColumn& row_as_column =
2032  deleted_rows_as_column_.column(RowToColIndex(row));
2033  const Fractional activity =
2034  rhs_[row] - ScalarProduct(solution->primal_values, row_as_column);
2035 
2036  // activity and sign correction must have the same sign or be zero. If
2037  // not, we find the first unbounded variable and change it accordingly.
2038  // Note that by construction, the variable value will move towards its
2039  // unbounded direction.
2040  if (activity * activity_sign_correction_[row] < 0.0) {
2041  const Fractional bound = activity / e.coefficient();
2042  if (std::abs(bound) > std::abs(primal_value_shift)) {
2043  primal_value_shift = bound;
2044  row_at_bound = row;
2045  }
2046  }
2047  }
2048  solution->primal_values[col] += primal_value_shift;
2049  if (row_at_bound != kInvalidRow) {
2051  solution->constraint_statuses[row_at_bound] =
2052  activity_sign_correction_[row_at_bound] == 1.0
2055  }
2056  }
2057 }
2058 
2059 // --------------------------------------------------------
2060 // FreeConstraintPreprocessor
2061 // --------------------------------------------------------
2062 
2065  RETURN_VALUE_IF_NULL(lp, false);
2066  const RowIndex num_rows = lp->num_constraints();
2067  for (RowIndex row(0); row < num_rows; ++row) {
2068  const Fractional lower_bound = lp->constraint_lower_bounds()[row];
2069  const Fractional upper_bound = lp->constraint_upper_bounds()[row];
2070  if (lower_bound == -kInfinity && upper_bound == kInfinity) {
2071  row_deletion_helper_.MarkRowForDeletion(row);
2072  }
2073  }
2074  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2075  return !row_deletion_helper_.IsEmpty();
2076 }
2077 
2079  ProblemSolution* solution) const {
2081  RETURN_IF_NULL(solution);
2082  row_deletion_helper_.RestoreDeletedRows(solution);
2083 }
2084 
2085 // --------------------------------------------------------
2086 // EmptyConstraintPreprocessor
2087 // --------------------------------------------------------
2088 
2091  RETURN_VALUE_IF_NULL(lp, false);
2092  const RowIndex num_rows(lp->num_constraints());
2093  const ColIndex num_cols(lp->num_variables());
2094 
2095  // Compute degree.
2096  StrictITIVector<RowIndex, int> degree(num_rows, 0);
2097  for (ColIndex col(0); col < num_cols; ++col) {
2098  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2099  ++degree[e.row()];
2100  }
2101  }
2102 
2103  // Delete degree 0 rows.
2104  for (RowIndex row(0); row < num_rows; ++row) {
2105  if (degree[row] == 0) {
2106  // We need to check that 0.0 is allowed by the constraint bounds,
2107  // otherwise, the problem is ProblemStatus::PRIMAL_INFEASIBLE.
2109  lp->constraint_lower_bounds()[row], 0) ||
2111  0, lp->constraint_upper_bounds()[row])) {
2112  VLOG(1) << "Problem PRIMAL_INFEASIBLE, constraint " << row
2113  << " is empty and its range ["
2114  << lp->constraint_lower_bounds()[row] << ","
2115  << lp->constraint_upper_bounds()[row] << "] doesn't contain 0.";
2117  return false;
2118  }
2119  row_deletion_helper_.MarkRowForDeletion(row);
2120  }
2121  }
2122  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2123  return !row_deletion_helper_.IsEmpty();
2124 }
2125 
2127  ProblemSolution* solution) const {
2129  RETURN_IF_NULL(solution);
2130  row_deletion_helper_.RestoreDeletedRows(solution);
2131 }
2132 
2133 // --------------------------------------------------------
2134 // SingletonPreprocessor
2135 // --------------------------------------------------------
2136 
2138  MatrixEntry e, ConstraintStatus status)
2139  : type_(type),
2140  is_maximization_(lp.IsMaximizationProblem()),
2141  e_(e),
2142  cost_(lp.objective_coefficients()[e.col]),
2143  variable_lower_bound_(lp.variable_lower_bounds()[e.col]),
2144  variable_upper_bound_(lp.variable_upper_bounds()[e.col]),
2145  constraint_lower_bound_(lp.constraint_lower_bounds()[e.row]),
2146  constraint_upper_bound_(lp.constraint_upper_bounds()[e.row]),
2147  constraint_status_(status) {}
2148 
2149 void SingletonUndo::Undo(const GlopParameters& parameters,
2150  const SparseMatrix& deleted_columns,
2151  const SparseMatrix& deleted_rows,
2152  ProblemSolution* solution) const {
2153  switch (type_) {
2154  case SINGLETON_ROW:
2155  SingletonRowUndo(deleted_columns, solution);
2156  break;
2158  ZeroCostSingletonColumnUndo(parameters, deleted_rows, solution);
2159  break;
2161  SingletonColumnInEqualityUndo(parameters, deleted_rows, solution);
2162  break;
2164  MakeConstraintAnEqualityUndo(solution);
2165  break;
2166  }
2167 }
2168 
2169 void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e,
2170  LinearProgram* lp) {
2171  Fractional implied_lower_bound =
2172  lp->constraint_lower_bounds()[e.row] / e.coeff;
2173  Fractional implied_upper_bound =
2174  lp->constraint_upper_bounds()[e.row] / e.coeff;
2175  if (e.coeff < 0.0) {
2176  std::swap(implied_lower_bound, implied_upper_bound);
2177  }
2178 
2179  const Fractional old_lower_bound = lp->variable_lower_bounds()[e.col];
2180  const Fractional old_upper_bound = lp->variable_upper_bounds()[e.col];
2181 
2182  const Fractional potential_error =
2183  std::abs(parameters_.preprocessor_zero_tolerance() / e.coeff);
2184  Fractional new_lower_bound =
2185  implied_lower_bound - potential_error > old_lower_bound
2186  ? implied_lower_bound
2187  : old_lower_bound;
2188  Fractional new_upper_bound =
2189  implied_upper_bound + potential_error < old_upper_bound
2190  ? implied_upper_bound
2191  : old_upper_bound;
2192 
2193  if (new_upper_bound < new_lower_bound) {
2194  if (!IsSmallerWithinFeasibilityTolerance(new_lower_bound,
2195  new_upper_bound)) {
2196  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2197  "row causes the bound of the variable "
2198  << e.col << " to be infeasible by "
2199  << new_lower_bound - new_upper_bound;
2201  return;
2202  }
2203  // Otherwise, fix the variable to one of its bounds.
2204  if (new_lower_bound == lp->variable_lower_bounds()[e.col]) {
2205  new_upper_bound = new_lower_bound;
2206  }
2207  if (new_upper_bound == lp->variable_upper_bounds()[e.col]) {
2208  new_lower_bound = new_upper_bound;
2209  }
2210  DCHECK_EQ(new_lower_bound, new_upper_bound);
2211  }
2212  row_deletion_helper_.MarkRowForDeletion(e.row);
2213  undo_stack_.push_back(SingletonUndo(SingletonUndo::SINGLETON_ROW, *lp, e,
2215  if (deleted_columns_.column(e.col).IsEmpty()) {
2216  deleted_columns_.mutable_column(e.col)->PopulateFromSparseVector(
2217  lp->GetSparseColumn(e.col));
2218  }
2219 
2220  lp->SetVariableBounds(e.col, new_lower_bound, new_upper_bound);
2221 }
2222 
2223 // The dual value of the row needs to be corrected to stay at the optimal.
2224 void SingletonUndo::SingletonRowUndo(const SparseMatrix& deleted_columns,
2225  ProblemSolution* solution) const {
2226  DCHECK_EQ(0, solution->dual_values[e_.row]);
2227 
2228  // If the variable is basic or free, we can just keep the constraint
2229  // VariableStatus::BASIC and
2230  // 0.0 as the dual value.
2231  const VariableStatus status = solution->variable_statuses[e_.col];
2232  if (status == VariableStatus::BASIC || status == VariableStatus::FREE) return;
2233 
2234  // Compute whether or not the variable bounds changed.
2235  Fractional implied_lower_bound = constraint_lower_bound_ / e_.coeff;
2236  Fractional implied_upper_bound = constraint_upper_bound_ / e_.coeff;
2237  if (e_.coeff < 0.0) {
2238  std::swap(implied_lower_bound, implied_upper_bound);
2239  }
2240  const bool lower_bound_changed = implied_lower_bound > variable_lower_bound_;
2241  const bool upper_bound_changed = implied_upper_bound < variable_upper_bound_;
2242 
2243  if (!lower_bound_changed && !upper_bound_changed) return;
2244  if (status == VariableStatus::AT_LOWER_BOUND && !lower_bound_changed) return;
2245  if (status == VariableStatus::AT_UPPER_BOUND && !upper_bound_changed) return;
2246 
2247  // This is the reduced cost of the variable before the singleton constraint is
2248  // added back.
2249  const Fractional reduced_cost =
2250  cost_ -
2251  ScalarProduct(solution->dual_values, deleted_columns.column(e_.col));
2252  const Fractional reduced_cost_for_minimization =
2253  is_maximization_ ? -reduced_cost : reduced_cost;
2254 
2255  if (status == VariableStatus::FIXED_VALUE) {
2256  DCHECK(lower_bound_changed || upper_bound_changed);
2257  if (reduced_cost_for_minimization >= 0.0 && !lower_bound_changed) {
2258  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2259  return;
2260  }
2261  if (reduced_cost_for_minimization <= 0.0 && !upper_bound_changed) {
2262  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2263  return;
2264  }
2265  }
2266 
2267  // If one of the variable bounds changes, and the variable is no longer at one
2268  // of its bounds, then its reduced cost needs to be set to 0.0 and the
2269  // variable becomes a basic variable. This is what the line below do, since
2270  // the new reduced cost of the variable will be equal to:
2271  // old_reduced_cost - coeff * solution->dual_values[row]
2272  solution->dual_values[e_.row] = reduced_cost / e_.coeff;
2273  ConstraintStatus new_constraint_status = VariableToConstraintStatus(status);
2274  if (status == VariableStatus::FIXED_VALUE &&
2275  (!lower_bound_changed || !upper_bound_changed)) {
2276  new_constraint_status = lower_bound_changed
2279  }
2280  if (e_.coeff < 0.0) {
2281  if (new_constraint_status == ConstraintStatus::AT_LOWER_BOUND) {
2282  new_constraint_status = ConstraintStatus::AT_UPPER_BOUND;
2283  } else if (new_constraint_status == ConstraintStatus::AT_UPPER_BOUND) {
2284  new_constraint_status = ConstraintStatus::AT_LOWER_BOUND;
2285  }
2286  }
2287  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2288  solution->constraint_statuses[e_.row] = new_constraint_status;
2289 }
2290 
2291 void SingletonPreprocessor::UpdateConstraintBoundsWithVariableBounds(
2292  MatrixEntry e, LinearProgram* lp) {
2293  Fractional lower_delta = -e.coeff * lp->variable_upper_bounds()[e.col];
2294  Fractional upper_delta = -e.coeff * lp->variable_lower_bounds()[e.col];
2295  if (e.coeff < 0.0) {
2296  std::swap(lower_delta, upper_delta);
2297  }
2298  lp->SetConstraintBounds(e.row,
2299  lp->constraint_lower_bounds()[e.row] + lower_delta,
2300  lp->constraint_upper_bounds()[e.row] + upper_delta);
2301 }
2302 
2303 bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable(
2304  const MatrixEntry& matrix_entry, const LinearProgram& lp) const {
2306  DCHECK(lp.IsVariableInteger(matrix_entry.col));
2307  const SparseMatrix& transpose = lp.GetTransposeSparseMatrix();
2308  for (const SparseColumn::Entry entry :
2309  transpose.column(RowToColIndex(matrix_entry.row))) {
2310  // Check if the variable is integer.
2311  if (!lp.IsVariableInteger(RowToColIndex(entry.row()))) {
2312  return false;
2313  }
2314 
2315  const Fractional coefficient = entry.coefficient();
2316  const Fractional coefficient_ratio = coefficient / matrix_entry.coeff;
2317  // Check if coefficient_ratio is integer.
2319  coefficient_ratio, parameters_.solution_feasibility_tolerance())) {
2320  return false;
2321  }
2322  }
2323  const Fractional constraint_lb =
2324  lp.constraint_lower_bounds()[matrix_entry.row];
2325  if (IsFinite(constraint_lb)) {
2326  const Fractional lower_bound_ratio = constraint_lb / matrix_entry.coeff;
2328  lower_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2329  return false;
2330  }
2331  }
2332  const Fractional constraint_ub =
2333  lp.constraint_upper_bounds()[matrix_entry.row];
2334  if (IsFinite(constraint_ub)) {
2335  const Fractional upper_bound_ratio = constraint_ub / matrix_entry.coeff;
2337  upper_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2338  return false;
2339  }
2340  }
2341  return true;
2342 }
2343 
2344 void SingletonPreprocessor::DeleteZeroCostSingletonColumn(
2345  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2346  const ColIndex transpose_col = RowToColIndex(e.row);
2347  const SparseColumn& column = transpose.column(transpose_col);
2348  undo_stack_.push_back(SingletonUndo(SingletonUndo::ZERO_COST_SINGLETON_COLUMN,
2349  *lp, e, ConstraintStatus::FREE));
2350  if (deleted_rows_.column(transpose_col).IsEmpty()) {
2351  deleted_rows_.mutable_column(transpose_col)
2352  ->PopulateFromSparseVector(column);
2353  }
2354  UpdateConstraintBoundsWithVariableBounds(e, lp);
2355  column_deletion_helper_.MarkColumnForDeletion(e.col);
2356 }
2357 
2358 // We need to restore the variable value in order to satisfy the constraint.
2359 void SingletonUndo::ZeroCostSingletonColumnUndo(
2360  const GlopParameters& parameters, const SparseMatrix& deleted_rows,
2361  ProblemSolution* solution) const {
2362  // If the variable was fixed, this is easy. Note that this is the only
2363  // possible case if the current constraint status is FIXED.
2364  if (variable_upper_bound_ == variable_lower_bound_) {
2365  solution->primal_values[e_.col] = variable_lower_bound_;
2366  solution->variable_statuses[e_.col] = VariableStatus::FIXED_VALUE;
2367  return;
2368  }
2369 
2370  const ConstraintStatus ct_status = solution->constraint_statuses[e_.row];
2372  if (ct_status == ConstraintStatus::AT_LOWER_BOUND ||
2373  ct_status == ConstraintStatus::AT_UPPER_BOUND) {
2374  if ((ct_status == ConstraintStatus::AT_UPPER_BOUND && e_.coeff > 0.0) ||
2375  (ct_status == ConstraintStatus::AT_LOWER_BOUND && e_.coeff < 0.0)) {
2376  DCHECK(IsFinite(variable_lower_bound_));
2377  solution->primal_values[e_.col] = variable_lower_bound_;
2378  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2379  } else {
2380  DCHECK(IsFinite(variable_upper_bound_));
2381  solution->primal_values[e_.col] = variable_upper_bound_;
2382  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2383  }
2384  if (constraint_upper_bound_ == constraint_lower_bound_) {
2385  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2386  }
2387  return;
2388  }
2389 
2390  // This is the activity of the constraint before the singleton variable is
2391  // added back to it.
2392  const ColIndex row_as_col = RowToColIndex(e_.row);
2393  const Fractional activity =
2394  ScalarProduct(solution->primal_values, deleted_rows.column(row_as_col));
2395 
2396  // First we try to fix the variable at its lower or upper bound and leave the
2397  // constraint VariableStatus::BASIC. Note that we use the same logic as in
2398  // Preprocessor::IsSmallerWithinPreprocessorZeroTolerance() which we can't use
2399  // here because we are not deriving from the Preprocessor class.
2400  const Fractional tolerance = parameters.preprocessor_zero_tolerance();
2401  const auto is_smaller_with_tolerance = [tolerance](Fractional a,
2402  Fractional b) {
2404  };
2405  if (variable_lower_bound_ != -kInfinity) {
2406  const Fractional activity_at_lb =
2407  activity + e_.coeff * variable_lower_bound_;
2408  if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_lb) &&
2409  is_smaller_with_tolerance(activity_at_lb, constraint_upper_bound_)) {
2410  solution->primal_values[e_.col] = variable_lower_bound_;
2411  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2412  return;
2413  }
2414  }
2415  if (variable_upper_bound_ != kInfinity) {
2416  const Fractional actibity_at_ub =
2417  activity + e_.coeff * variable_upper_bound_;
2418  if (is_smaller_with_tolerance(constraint_lower_bound_, actibity_at_ub) &&
2419  is_smaller_with_tolerance(actibity_at_ub, constraint_upper_bound_)) {
2420  solution->primal_values[e_.col] = variable_upper_bound_;
2421  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2422  return;
2423  }
2424  }
2425 
2426  // If the current constraint is UNBOUNDED, then the variable is too
2427  // because of the two cases above. We just set its status to
2428  // VariableStatus::FREE.
2429  if (constraint_lower_bound_ == -kInfinity &&
2430  constraint_upper_bound_ == kInfinity) {
2431  solution->primal_values[e_.col] = 0.0;
2432  solution->variable_statuses[e_.col] = VariableStatus::FREE;
2433  return;
2434  }
2435 
2436  // If the previous cases didn't apply, the constraint will be fixed to its
2437  // bounds and the variable will be made VariableStatus::BASIC.
2438  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2439  if (constraint_lower_bound_ == constraint_upper_bound_) {
2440  solution->primal_values[e_.col] =
2441  (constraint_lower_bound_ - activity) / e_.coeff;
2442  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2443  return;
2444  }
2445 
2446  bool set_constraint_to_lower_bound;
2447  if (constraint_lower_bound_ == -kInfinity) {
2448  set_constraint_to_lower_bound = false;
2449  } else if (constraint_upper_bound_ == kInfinity) {
2450  set_constraint_to_lower_bound = true;
2451  } else {
2452  // In this case we select the value that is the most inside the variable
2453  // bound.
2454  const Fractional to_lb = (constraint_lower_bound_ - activity) / e_.coeff;
2455  const Fractional to_ub = (constraint_upper_bound_ - activity) / e_.coeff;
2456  set_constraint_to_lower_bound =
2457  std::max(variable_lower_bound_ - to_lb, to_lb - variable_upper_bound_) <
2458  std::max(variable_lower_bound_ - to_ub, to_ub - variable_upper_bound_);
2459  }
2460 
2461  if (set_constraint_to_lower_bound) {
2462  solution->primal_values[e_.col] =
2463  (constraint_lower_bound_ - activity) / e_.coeff;
2464  solution->constraint_statuses[e_.row] = ConstraintStatus::AT_LOWER_BOUND;
2465  } else {
2466  solution->primal_values[e_.col] =
2467  (constraint_upper_bound_ - activity) / e_.coeff;
2468  solution->constraint_statuses[e_.row] = ConstraintStatus::AT_UPPER_BOUND;
2469  }
2470 }
2471 
2472 void SingletonPreprocessor::DeleteSingletonColumnInEquality(
2473  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2474  // Save information for the undo.
2475  const ColIndex transpose_col = RowToColIndex(e.row);
2476  const SparseColumn& row_as_column = transpose.column(transpose_col);
2477  undo_stack_.push_back(
2478  SingletonUndo(SingletonUndo::SINGLETON_COLUMN_IN_EQUALITY, *lp, e,
2480  deleted_rows_.mutable_column(transpose_col)
2481  ->PopulateFromSparseVector(row_as_column);
2482 
2483  // Update the objective function using the equality constraint. We have
2484  // v_col*coeff + expression = rhs,
2485  // so the contribution of this variable to the cost function (v_col * cost)
2486  // can be rewrited as:
2487  // (rhs * cost - expression * cost) / coeff.
2488  const Fractional rhs = lp->constraint_upper_bounds()[e.row];
2489  const Fractional cost = lp->objective_coefficients()[e.col];
2490  const Fractional multiplier = cost / e.coeff;
2491  lp->SetObjectiveOffset(lp->objective_offset() + rhs * multiplier);
2492  for (const SparseColumn::Entry e : row_as_column) {
2493  const ColIndex col = RowToColIndex(e.row());
2494  if (!column_deletion_helper_.IsColumnMarked(col)) {
2495  Fractional new_cost =
2496  lp->objective_coefficients()[col] - e.coefficient() * multiplier;
2497 
2498  // TODO(user): It is important to avoid having non-zero costs which are
2499  // the result of numerical error. This is because we still miss some
2500  // tolerances in a few preprocessors. Like an empty column with a cost of
2501  // 1e-17 and unbounded towards infinity is currently implying that the
2502  // problem is unbounded. This will need fixing.
2503  if (std::abs(new_cost) < parameters_.preprocessor_zero_tolerance()) {
2504  new_cost = 0.0;
2505  }
2506  lp->SetObjectiveCoefficient(col, new_cost);
2507  }
2508  }
2509 
2510  // Now delete the column like a singleton column without cost.
2511  UpdateConstraintBoundsWithVariableBounds(e, lp);
2512  column_deletion_helper_.MarkColumnForDeletion(e.col);
2513 }
2514 
2515 void SingletonUndo::SingletonColumnInEqualityUndo(
2516  const GlopParameters& parameters, const SparseMatrix& deleted_rows,
2517  ProblemSolution* solution) const {
2518  // First do the same as a zero-cost singleton column.
2519  ZeroCostSingletonColumnUndo(parameters, deleted_rows, solution);
2520 
2521  // Then, restore the dual optimal value taking into account the cost
2522  // modification.
2523  solution->dual_values[e_.row] += cost_ / e_.coeff;
2524  if (solution->constraint_statuses[e_.row] == ConstraintStatus::BASIC) {
2525  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2526  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2527  }
2528 }
2529 
2530 void SingletonUndo::MakeConstraintAnEqualityUndo(
2531  ProblemSolution* solution) const {
2532  if (solution->constraint_statuses[e_.row] == ConstraintStatus::FIXED_VALUE) {
2533  solution->constraint_statuses[e_.row] = constraint_status_;
2534  }
2535 }
2536 
2537 bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible(
2538  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2539  // TODO(user): We could skip early if the relevant constraint bound is
2540  // infinity.
2541  const Fractional cst_lower_bound = lp->constraint_lower_bounds()[e.row];
2542  const Fractional cst_upper_bound = lp->constraint_upper_bounds()[e.row];
2543  if (cst_lower_bound == cst_upper_bound) return true;
2544 
2545  // To be efficient, we only process a row once and cache the domain that an
2546  // "artificial" extra variable x with coefficient 1.0 could take while still
2547  // making the constraint feasible. The domain bounds for the constraint e.row
2548  // will be stored in row_lb_sum_[e.row] and row_ub_sum_[e.row].
2549  const DenseRow& variable_ubs = lp->variable_upper_bounds();
2550  const DenseRow& variable_lbs = lp->variable_lower_bounds();
2551  if (e.row >= row_sum_is_cached_.size() || !row_sum_is_cached_[e.row]) {
2552  if (e.row >= row_sum_is_cached_.size()) {
2553  const int new_size = e.row.value() + 1;
2554  row_sum_is_cached_.resize(new_size);
2555  row_lb_sum_.resize(new_size);
2556  row_ub_sum_.resize(new_size);
2557  }
2558  row_sum_is_cached_[e.row] = true;
2559  row_lb_sum_[e.row].Add(cst_lower_bound);
2560  row_ub_sum_[e.row].Add(cst_upper_bound);
2561  for (const SparseColumn::Entry entry :
2562  transpose.column(RowToColIndex(e.row))) {
2563  const ColIndex row_as_col = RowToColIndex(entry.row());
2564 
2565  // Tricky: Even if later more columns are deleted, these "cached" sums
2566  // will actually still be valid because we only delete columns in a
2567  // compatible way.
2568  //
2569  // TODO(user): Find a more robust way? it seems easy to add new deletion
2570  // rules that may break this assumption.
2571  if (column_deletion_helper_.IsColumnMarked(row_as_col)) continue;
2572  if (entry.coefficient() > 0.0) {
2573  row_lb_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2574  row_ub_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2575  } else {
2576  row_lb_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2577  row_ub_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2578  }
2579 
2580  // TODO(user): Abort early if both sums contain more than 1 infinity?
2581  }
2582  }
2583 
2584  // Now that the lb/ub sum for the row is cached, we can use it to compute the
2585  // implied bounds on the variable from this constraint and the other
2586  // variables.
2587  const Fractional c = e.coeff;
2588  const Fractional lb =
2589  c > 0.0 ? row_lb_sum_[e.row].SumWithout(-c * variable_ubs[e.col]) / c
2590  : row_ub_sum_[e.row].SumWithout(-c * variable_ubs[e.col]) / c;
2591  const Fractional ub =
2592  c > 0.0 ? row_ub_sum_[e.row].SumWithout(-c * variable_lbs[e.col]) / c
2593  : row_lb_sum_[e.row].SumWithout(-c * variable_lbs[e.col]) / c;
2594 
2595  // Note that we could do the same for singleton variables with a cost of
2596  // 0.0, but such variable are already dealt with by
2597  // DeleteZeroCostSingletonColumn() so there is no point.
2598  const Fractional cost =
2599  lp->GetObjectiveCoefficientForMinimizationVersion(e.col);
2600  DCHECK_NE(cost, 0.0);
2601 
2602  // Note that some of the tests below will be always true if the bounds of
2603  // the column of index col are infinite. This is the desired behavior.
2606  ub, lp->variable_upper_bounds()[e.col])) {
2607  if (e.coeff > 0) {
2608  if (cst_upper_bound == kInfinity) {
2610  } else {
2611  relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2612  lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2613  }
2614  } else {
2615  if (cst_lower_bound == -kInfinity) {
2617  } else {
2618  relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2619  lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2620  }
2621  }
2622 
2624  DCHECK_EQ(ub, kInfinity);
2625  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2626  "variable "
2627  << e.col << " has a cost (for minimization) of " << cost
2628  << " and is unbounded towards kInfinity.";
2629  return false;
2630  }
2631 
2632  // This is important but tricky: The upper bound of the variable needs to
2633  // be relaxed. This is valid because the implied bound is lower than the
2634  // original upper bound here. This is needed, so that the optimal
2635  // primal/dual values of the new problem will also be optimal of the
2636  // original one.
2637  //
2638  // Let's prove the case coeff > 0.0 for a minimization problem. In the new
2639  // problem, because the variable is unbounded towards +infinity, its
2640  // reduced cost must satisfy at optimality rc = cost - coeff * dual_v >=
2641  // 0. But this implies dual_v <= cost / coeff <= 0. This is exactly what
2642  // is needed for the optimality of the initial problem since the
2643  // constraint will be at its upper bound, and the corresponding slack
2644  // condition is that the dual value needs to be <= 0.
2645  lp->SetVariableBounds(e.col, lp->variable_lower_bounds()[e.col], kInfinity);
2646  }
2648  lp->variable_lower_bounds()[e.col], lb)) {
2649  if (e.coeff > 0) {
2650  if (cst_lower_bound == -kInfinity) {
2652  } else {
2653  relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2654  lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2655  }
2656  } else {
2657  if (cst_upper_bound == kInfinity) {
2659  } else {
2660  relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2661  lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2662  }
2663  }
2664 
2666  DCHECK_EQ(lb, -kInfinity);
2667  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2668  "variable "
2669  << e.col << " has a cost (for minimization) of " << cost
2670  << " and is unbounded towards -kInfinity.";
2671  return false;
2672  }
2673 
2674  // Same remark as above for a lower bounded variable this time.
2675  lp->SetVariableBounds(e.col, -kInfinity,
2676  lp->variable_upper_bounds()[e.col]);
2677  }
2678 
2679  if (lp->constraint_lower_bounds()[e.row] ==
2680  lp->constraint_upper_bounds()[e.row]) {
2681  undo_stack_.push_back(SingletonUndo(
2682  SingletonUndo::MAKE_CONSTRAINT_AN_EQUALITY, *lp, e, relaxed_status));
2683  return true;
2684  }
2685  return false;
2686 }
2687 
2690  RETURN_VALUE_IF_NULL(lp, false);
2691  const SparseMatrix& matrix = lp->GetSparseMatrix();
2692  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2693 
2694  // Initialize column_to_process with the current singleton columns.
2695  ColIndex num_cols(matrix.num_cols());
2696  RowIndex num_rows(matrix.num_rows());
2697  deleted_columns_.PopulateFromZero(num_rows, num_cols);
2698  deleted_rows_.PopulateFromZero(ColToRowIndex(num_cols),
2699  RowToColIndex(num_rows));
2700  StrictITIVector<ColIndex, EntryIndex> column_degree(num_cols, EntryIndex(0));
2701  std::vector<ColIndex> column_to_process;
2702  for (ColIndex col(0); col < num_cols; ++col) {
2703  column_degree[col] = matrix.column(col).num_entries();
2704  if (column_degree[col] == 1) {
2705  column_to_process.push_back(col);
2706  }
2707  }
2708 
2709  // Initialize row_to_process with the current singleton rows.
2710  StrictITIVector<RowIndex, EntryIndex> row_degree(num_rows, EntryIndex(0));
2711  std::vector<RowIndex> row_to_process;
2712  for (RowIndex row(0); row < num_rows; ++row) {
2713  row_degree[row] = transpose.column(RowToColIndex(row)).num_entries();
2714  if (row_degree[row] == 1) {
2715  row_to_process.push_back(row);
2716  }
2717  }
2718 
2719  // Process current singleton rows/columns and enqueue new ones.
2720  while (status_ == ProblemStatus::INIT &&
2721  (!column_to_process.empty() || !row_to_process.empty())) {
2722  while (status_ == ProblemStatus::INIT && !column_to_process.empty()) {
2723  const ColIndex col = column_to_process.back();
2724  column_to_process.pop_back();
2725  if (column_degree[col] <= 0) continue;
2726  const MatrixEntry e = GetSingletonColumnMatrixEntry(col, matrix);
2727  if (in_mip_context_ && lp->IsVariableInteger(e.col) &&
2728  !IntegerSingletonColumnIsRemovable(e, *lp)) {
2729  continue;
2730  }
2731 
2732  // TODO(user): It seems better to process all the singleton columns with
2733  // a cost of zero first.
2734  if (lp->objective_coefficients()[col] == 0.0) {
2735  DeleteZeroCostSingletonColumn(transpose, e, lp);
2736  } else if (MakeConstraintAnEqualityIfPossible(transpose, e, lp)) {
2737  DeleteSingletonColumnInEquality(transpose, e, lp);
2738  } else {
2739  continue;
2740  }
2741  --row_degree[e.row];
2742  if (row_degree[e.row] == 1) {
2743  row_to_process.push_back(e.row);
2744  }
2745  }
2746  while (status_ == ProblemStatus::INIT && !row_to_process.empty()) {
2747  const RowIndex row = row_to_process.back();
2748  row_to_process.pop_back();
2749  if (row_degree[row] <= 0) continue;
2750  const MatrixEntry e = GetSingletonRowMatrixEntry(row, transpose);
2751 
2752  DeleteSingletonRow(e, lp);
2753  --column_degree[e.col];
2754  if (column_degree[e.col] == 1) {
2755  column_to_process.push_back(e.col);
2756  }
2757  }
2758  }
2759 
2760  if (status_ != ProblemStatus::INIT) return false;
2761  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2762  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2763  return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2764 }
2765 
2768  RETURN_IF_NULL(solution);
2769 
2770  // Note that the two deletion helpers must restore 0.0 values in the positions
2771  // that will be used during Undo(). That is, all the calls done by this class
2772  // to MarkColumnForDeletion() should be done with 0.0 as the value to restore
2773  // (which is already the case when using MarkRowForDeletion()).
2774  // This is important because the various Undo() functions assume that a
2775  // primal/dual variable value which isn't restored yet has the value of 0.0.
2776  column_deletion_helper_.RestoreDeletedColumns(solution);
2777  row_deletion_helper_.RestoreDeletedRows(solution);
2778 
2779  // It is important to undo the operations in the correct order, i.e. in the
2780  // reverse order in which they were done.
2781  for (int i = undo_stack_.size() - 1; i >= 0; --i) {
2782  undo_stack_[i].Undo(parameters_, deleted_columns_, deleted_rows_, solution);
2783  }
2784 }
2785 
2786 MatrixEntry SingletonPreprocessor::GetSingletonColumnMatrixEntry(
2787  ColIndex col, const SparseMatrix& matrix) {
2788  for (const SparseColumn::Entry e : matrix.column(col)) {
2789  if (!row_deletion_helper_.IsRowMarked(e.row())) {
2790  DCHECK_NE(0.0, e.coefficient());
2791  return MatrixEntry(e.row(), col, e.coefficient());
2792  }
2793  }
2794  // This shouldn't happen.
2795  LOG(DFATAL) << "No unmarked entry in a column that is supposed to have one.";
2797  return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2798 }
2799 
2800 MatrixEntry SingletonPreprocessor::GetSingletonRowMatrixEntry(
2801  RowIndex row, const SparseMatrix& transpose) {
2802  for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
2803  const ColIndex col = RowToColIndex(e.row());
2804  if (!column_deletion_helper_.IsColumnMarked(col)) {
2805  DCHECK_NE(0.0, e.coefficient());
2806  return MatrixEntry(row, col, e.coefficient());
2807  }
2808  }
2809  // This shouldn't happen.
2810  LOG(DFATAL) << "No unmarked entry in a row that is supposed to have one.";
2812  return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2813 }
2814 
2815 // --------------------------------------------------------
2816 // RemoveNearZeroEntriesPreprocessor
2817 // --------------------------------------------------------
2818 
2821  RETURN_VALUE_IF_NULL(lp, false);
2822  const ColIndex num_cols = lp->num_variables();
2823  if (num_cols == 0) return false;
2824 
2825  // We will use a different threshold for each row depending on its degree.
2826  // We use Fractionals for convenience since they will be used as such below.
2827  const RowIndex num_rows = lp->num_constraints();
2828  DenseColumn row_degree(num_rows, 0.0);
2829  Fractional num_non_zero_objective_coefficients = 0.0;
2830  for (ColIndex col(0); col < num_cols; ++col) {
2831  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2832  row_degree[e.row()] += 1.0;
2833  }
2834  if (lp->objective_coefficients()[col] != 0.0) {
2835  num_non_zero_objective_coefficients += 1.0;
2836  }
2837  }
2838 
2839  // To not have too many parameters, we use the preprocessor_zero_tolerance.
2840  const Fractional allowed_impact = parameters_.preprocessor_zero_tolerance();
2841 
2842  // TODO(user): Our criteria ensure that during presolve a primal feasible
2843  // solution will stay primal feasible. However, we have no guarantee on the
2844  // dual-feasibility (because the dual variable values range is not taken into
2845  // account). Fix that? or find a better criteria since it seems that on all
2846  // our current problems, this preprocessor helps and doesn't introduce errors.
2847  const EntryIndex initial_num_entries = lp->num_entries();
2848  int num_zeroed_objective_coefficients = 0;
2849  for (ColIndex col(0); col < num_cols; ++col) {
2850  const Fractional lower_bound = lp->variable_lower_bounds()[col];
2851  const Fractional upper_bound = lp->variable_upper_bounds()[col];
2852 
2853  // TODO(user): Write a small class that takes a matrix, its transpose, row
2854  // and column bounds, and "propagate" the bounds as much as possible so we
2855  // can use this better estimate here and remove more near-zero entries.
2856  const Fractional max_magnitude =
2857  std::max(std::abs(lower_bound), std::abs(upper_bound));
2858  if (max_magnitude == kInfinity || max_magnitude == 0) continue;
2859  const Fractional threshold = allowed_impact / max_magnitude;
2861  threshold, row_degree);
2862 
2863  if (lp->objective_coefficients()[col] != 0.0 &&
2864  num_non_zero_objective_coefficients *
2865  std::abs(lp->objective_coefficients()[col]) <
2866  threshold) {
2867  lp->SetObjectiveCoefficient(col, 0.0);
2868  ++num_zeroed_objective_coefficients;
2869  }
2870  }
2871 
2872  const EntryIndex num_entries = lp->num_entries();
2873  if (num_entries != initial_num_entries) {
2874  VLOG(1) << "Removed " << initial_num_entries - num_entries
2875  << " near-zero entries.";
2876  }
2877  if (num_zeroed_objective_coefficients > 0) {
2878  VLOG(1) << "Removed " << num_zeroed_objective_coefficients
2879  << " near-zero objective coefficients.";
2880  }
2881 
2882  // No post-solve is required.
2883  return false;
2884 }
2885 
2887  ProblemSolution* solution) const {}
2888 
2889 // --------------------------------------------------------
2890 // SingletonColumnSignPreprocessor
2891 // --------------------------------------------------------
2892 
2895  RETURN_VALUE_IF_NULL(lp, false);
2896  const ColIndex num_cols = lp->num_variables();
2897  if (num_cols == 0) return false;
2898 
2899  changed_columns_.clear();
2900  int num_singletons = 0;
2901  for (ColIndex col(0); col < num_cols; ++col) {
2902  SparseColumn* sparse_column = lp->GetMutableSparseColumn(col);
2903  const Fractional cost = lp->objective_coefficients()[col];
2904  if (sparse_column->num_entries() == 1) {
2905  ++num_singletons;
2906  }
2907  if (sparse_column->num_entries() == 1 &&
2908  sparse_column->GetFirstCoefficient() < 0) {
2909  sparse_column->MultiplyByConstant(-1.0);
2911  -lp->variable_lower_bounds()[col]);
2913  changed_columns_.push_back(col);
2914  }
2915  }
2916  VLOG(1) << "Changed the sign of " << changed_columns_.size() << " columns.";
2917  VLOG(1) << num_singletons << " singleton columns left.";
2918  return !changed_columns_.empty();
2919 }
2920 
2922  ProblemSolution* solution) const {
2924  RETURN_IF_NULL(solution);
2925  for (int i = 0; i < changed_columns_.size(); ++i) {
2926  const ColIndex col = changed_columns_[i];
2927  solution->primal_values[col] = -solution->primal_values[col];
2928  const VariableStatus status = solution->variable_statuses[col];
2931  } else if (status == VariableStatus::AT_LOWER_BOUND) {
2933  }
2934  }
2935 }
2936 
2937 // --------------------------------------------------------
2938 // DoubletonEqualityRowPreprocessor
2939 // --------------------------------------------------------
2940 
2943  RETURN_VALUE_IF_NULL(lp, false);
2944 
2945  // This is needed at postsolve.
2946  //
2947  // TODO(user): Get rid of the FIXED status instead to avoid spending
2948  // time/memory for no good reason here.
2949  saved_row_lower_bounds_ = lp->constraint_lower_bounds();
2950  saved_row_upper_bounds_ = lp->constraint_upper_bounds();
2951 
2952  // Note that we don't update the transpose during this preprocessor run.
2953  const SparseMatrix& original_transpose = lp->GetTransposeSparseMatrix();
2954 
2955  // Iterate over the rows that were already doubletons before this preprocessor
2956  // run, and whose items don't belong to a column that we deleted during this
2957  // run. This implies that the rows are only ever touched once per run, because
2958  // we only modify rows that have an item on a deleted column.
2959  const RowIndex num_rows(lp->num_constraints());
2960  for (RowIndex row(0); row < num_rows; ++row) {
2961  const SparseColumn& original_row =
2962  original_transpose.column(RowToColIndex(row));
2963  if (original_row.num_entries() != 2 ||
2964  lp->constraint_lower_bounds()[row] !=
2965  lp->constraint_upper_bounds()[row]) {
2966  continue;
2967  }
2968 
2969  // Collect the two row items. Skip the ones involving a deleted column.
2970  // Note: we filled r.col[] and r.coeff[] by item order, and currently we
2971  // always pick the first column as the to-be-deleted one.
2972  // TODO(user): make a smarter choice of which column to delete, and
2973  // swap col[] and coeff[] accordingly.
2974  RestoreInfo r; // Use a short name since we're using it everywhere.
2975  int entry_index = 0;
2976  for (const SparseColumn::Entry e : original_row) {
2977  const ColIndex col = RowToColIndex(e.row());
2978  if (column_deletion_helper_.IsColumnMarked(col)) continue;
2979  r.col[entry_index] = col;
2980  r.coeff[entry_index] = e.coefficient();
2981  DCHECK_NE(0.0, r.coeff[entry_index]);
2982  ++entry_index;
2983  }
2984 
2985  // Discard some cases that will be treated by other preprocessors, or by
2986  // another run of this one.
2987  // 1) One or two of the items were in a deleted column.
2988  if (entry_index < 2) continue;
2989 
2990  // Fill the RestoreInfo, even if we end up not using it (because we
2991  // give up on preprocessing this row): it has a bunch of handy shortcuts.
2992  r.row = row;
2993  r.rhs = lp->constraint_lower_bounds()[row];
2994  for (int col_choice = 0; col_choice < NUM_DOUBLETON_COLS; ++col_choice) {
2995  const ColIndex col = r.col[col_choice];
2996  r.lb[col_choice] = lp->variable_lower_bounds()[col];
2997  r.ub[col_choice] = lp->variable_upper_bounds()[col];
2998  r.objective_coefficient[col_choice] = lp->objective_coefficients()[col];
2999  r.column[col_choice].PopulateFromSparseVector(lp->GetSparseColumn(col));
3000  }
3001 
3002  // 2) One of the columns is fixed: don't bother, it will be treated
3003  // by the FixedVariablePreprocessor.
3004  if (r.lb[DELETED] == r.ub[DELETED] || r.lb[MODIFIED] == r.ub[MODIFIED]) {
3005  continue;
3006  }
3007 
3008  // Look at the bounds of both variables and exit early if we can delegate
3009  // to another pre-processor; otherwise adjust the bounds of the remaining
3010  // variable as necessary.
3011  // If the current row is: aX + bY = c, then the bounds of Y must be
3012  // adjusted to satisfy Y = c/b + (-a/b)X
3013  //
3014  // Note: when we compute the coefficients of these equations, we can cause
3015  // underflows/overflows that could be avoided if we did the computations
3016  // more carefully; but for now we just treat those cases as
3017  // ProblemStatus::ABNORMAL.
3018  // TODO(user): consider skipping the problematic rows in this preprocessor,
3019  // or trying harder to avoid the under/overflow.
3020  {
3021  const Fractional carry_over_offset = r.rhs / r.coeff[MODIFIED];
3022  const Fractional carry_over_factor =
3023  -r.coeff[DELETED] / r.coeff[MODIFIED];
3024  if (!IsFinite(carry_over_offset) || !IsFinite(carry_over_factor) ||
3025  carry_over_factor == 0.0) {
3027  break;
3028  }
3029 
3030  Fractional lb = r.lb[MODIFIED];
3031  Fractional ub = r.ub[MODIFIED];
3032  Fractional carried_over_lb =
3033  r.lb[DELETED] * carry_over_factor + carry_over_offset;
3034  Fractional carried_over_ub =
3035  r.ub[DELETED] * carry_over_factor + carry_over_offset;
3036  if (carry_over_factor < 0) {
3037  std::swap(carried_over_lb, carried_over_ub);
3038  }
3039  if (carried_over_lb <= lb) {
3040  // Default (and simplest) case: the lower bound didn't change.
3041  r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3042  MODIFIED, VariableStatus::AT_LOWER_BOUND, lb);
3043  } else {
3044  lb = carried_over_lb;
3045  r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3046  DELETED,
3047  carry_over_factor > 0 ? VariableStatus::AT_LOWER_BOUND
3049  carry_over_factor > 0 ? r.lb[DELETED] : r.ub[DELETED]);
3050  }
3051  if (carried_over_ub >= ub) {
3052  // Default (and simplest) case: the upper bound didn't change.
3053  r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3054  MODIFIED, VariableStatus::AT_UPPER_BOUND, ub);
3055  } else {
3056  ub = carried_over_ub;
3057  r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3058  DELETED,
3059  carry_over_factor > 0 ? VariableStatus::AT_UPPER_BOUND
3061  carry_over_factor > 0 ? r.ub[DELETED] : r.lb[DELETED]);
3062  }
3063  // 3) If the new bounds are fixed (the domain is a singleton) or
3064  // infeasible, then we let the
3065  // ForcingAndImpliedFreeConstraintPreprocessor do the work.
3066  if (IsSmallerWithinPreprocessorZeroTolerance(ub, lb)) continue;
3067  lp->SetVariableBounds(r.col[MODIFIED], lb, ub);
3068  }
3069 
3070  restore_stack_.push_back(r);
3071 
3072  // Now, perform the substitution. If the current row is: aX + bY = c
3073  // then any other row containing 'X' with coefficient x can remove the
3074  // entry in X, and instead add an entry on 'Y' with coefficient x(-b/a)
3075  // and a constant offset x(c/a).
3076  // Looking at the matrix, this translates into colY += (-b/a) colX.
3077  DCHECK_NE(r.coeff[DELETED], 0.0);
3078  const Fractional substitution_factor =
3079  -r.coeff[MODIFIED] / r.coeff[DELETED]; // -b/a
3080  const Fractional constant_offset_factor = r.rhs / r.coeff[DELETED]; // c/a
3081  // Again we don't bother too much with over/underflows.
3082  if (!IsFinite(substitution_factor) || substitution_factor == 0.0 ||
3083  !IsFinite(constant_offset_factor)) {
3085  break;
3086  }
3087  r.column[DELETED].AddMultipleToSparseVectorAndDeleteCommonIndex(
3088  substitution_factor, row, parameters_.drop_tolerance(),
3089  lp->GetMutableSparseColumn(r.col[MODIFIED]));
3090 
3091  // Apply similar operations on the objective coefficients.
3092  // Note that the offset is being updated by
3093  // SubtractColumnMultipleFromConstraintBound() below.
3094  {
3095  const Fractional new_objective =
3096  r.objective_coefficient[MODIFIED] +
3097  substitution_factor * r.objective_coefficient[DELETED];
3098  if (std::abs(new_objective) > parameters_.drop_tolerance()) {
3099  lp->SetObjectiveCoefficient(r.col[MODIFIED], new_objective);
3100  } else {
3101  lp->SetObjectiveCoefficient(r.col[MODIFIED], 0.0);
3102  }
3103  }
3104 
3105  // Carry over the constant factor of the substitution as well.
3106  // TODO(user): rename that method to reflect the fact that it also updates
3107  // the objective offset, in the other direction.
3108  SubtractColumnMultipleFromConstraintBound(r.col[DELETED],
3109  constant_offset_factor, lp);
3110 
3111  // Mark the column and the row for deletion.
3112  column_deletion_helper_.MarkColumnForDeletion(r.col[DELETED]);
3113  row_deletion_helper_.MarkRowForDeletion(row);
3114  }
3115  if (status_ != ProblemStatus::INIT) return false;
3116  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
3117  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
3118  return !column_deletion_helper_.IsEmpty();
3119 }
3120 
3122  ProblemSolution* solution) const {
3124  RETURN_IF_NULL(solution);
3125  column_deletion_helper_.RestoreDeletedColumns(solution);
3126  row_deletion_helper_.RestoreDeletedRows(solution);
3127  for (const RestoreInfo& r : Reverse(restore_stack_)) {
3128  switch (solution->variable_statuses[r.col[MODIFIED]]) {
3130  LOG(DFATAL) << "FIXED variable produced by DoubletonPreprocessor!";
3131  // In non-fastbuild mode, we rely on the rest of the code producing an
3132  // ProblemStatus::ABNORMAL status here.
3133  break;
3134  // When the modified variable is either basic or free, we keep it as is,
3135  // and simply make the deleted one basic.
3136  case VariableStatus::FREE:
3137  ABSL_FALLTHROUGH_INTENDED;
3138  case VariableStatus::BASIC:
3139  // Several code paths set the deleted column as basic. The code that
3140  // sets its value in that case is below, after the switch() block.
3141  solution->variable_statuses[r.col[DELETED]] = VariableStatus::BASIC;
3142  break;
3144  ABSL_FALLTHROUGH_INTENDED;
3146  // The bound was induced by a bound of one of the two original
3147  // variables. Put that original variable at its bound, and make
3148  // the other one basic.
3149  const RestoreInfo::ColChoiceAndStatus& bound_backtracking =
3150  solution->variable_statuses[r.col[MODIFIED]] ==
3152  ? r.bound_backtracking_at_lower_bound
3153  : r.bound_backtracking_at_upper_bound;
3154  const ColIndex bounded_var = r.col[bound_backtracking.col_choice];
3155  const ColIndex basic_var =
3156  r.col[OtherColChoice(bound_backtracking.col_choice)];
3157  solution->variable_statuses[bounded_var] = bound_backtracking.status;
3158  solution->primal_values[bounded_var] = bound_backtracking.value;
3159  solution->variable_statuses[basic_var] = VariableStatus::BASIC;
3160  // If the modified column is VariableStatus::BASIC, then its value is
3161  // already set
3162  // correctly. If it's the deleted column that is basic, its value is
3163  // set below the switch() block.
3164  }
3165  }
3166 
3167  // Restore the value of the deleted column if it is VariableStatus::BASIC.
3168  if (solution->variable_statuses[r.col[DELETED]] == VariableStatus::BASIC) {
3169  solution->primal_values[r.col[DELETED]] =
3170  (r.rhs -
3171  solution->primal_values[r.col[MODIFIED]] * r.coeff[MODIFIED]) /
3172  r.coeff[DELETED];
3173  }
3174 
3175  // Make the deleted constraint status FIXED.
3177 
3178  // Adjust the dual value of the deleted constraint so that the
3179  // VariableStatus::BASIC column have a reduced cost of zero (if the two
3180  // are VariableStatus::BASIC, just pick one).
3181  // The reduced cost of the other variable will then automatically be
3182  // correct: zero if it's VariableStatus::BASIC, and with the correct sign if
3183  // it's bounded.
3184  const ColChoice col_choice =
3185  solution->variable_statuses[r.col[DELETED]] == VariableStatus::BASIC
3186  ? DELETED
3187  : MODIFIED;
3188  // To compute the reduced cost properly (i.e. without the restored row).
3189  solution->dual_values[r.row] = 0.0;
3190  const Fractional current_reduced_cost =
3191  r.objective_coefficient[col_choice] -
3192  PreciseScalarProduct(solution->dual_values, r.column[col_choice]);
3193  solution->dual_values[r.row] = current_reduced_cost / r.coeff[col_choice];
3194  }
3195 
3196  // Fix potential bad ConstraintStatus::FIXED_VALUE statuses.
3197  FixConstraintWithFixedStatuses(saved_row_lower_bounds_,
3198  saved_row_upper_bounds_, solution);
3199 }
3200 
3201 void FixConstraintWithFixedStatuses(const DenseColumn& row_lower_bounds,
3202  const DenseColumn& row_upper_bounds,
3203  ProblemSolution* solution) {
3204  const RowIndex num_rows = solution->constraint_statuses.size();
3205  DCHECK_EQ(row_lower_bounds.size(), num_rows);
3206  DCHECK_EQ(row_upper_bounds.size(), num_rows);
3207  for (RowIndex row(0); row < num_rows; ++row) {
3209  continue;
3210  }
3211  if (row_lower_bounds[row] == row_upper_bounds[row]) continue;
3212 
3213  // We need to fix the status and we just need to make sure that the bound we
3214  // choose satisfies the LP optimality conditions.
3215  if (solution->dual_values[row] > 0) {
3217  } else {
3219  }
3220  }
3221 }
3222 
3223 void DoubletonEqualityRowPreprocessor::
3224  SwapDeletedAndModifiedVariableRestoreInfo(RestoreInfo* r) {
3225  using std::swap;
3226  swap(r->col[DELETED], r->col[MODIFIED]);
3227  swap(r->coeff[DELETED], r->coeff[MODIFIED]);
3228  swap(r->lb[DELETED], r->lb[MODIFIED]);
3229  swap(r->ub[DELETED], r->ub[MODIFIED]);
3230  swap(r->column[DELETED], r->column[MODIFIED]);
3231  swap(r->objective_coefficient[DELETED], r->objective_coefficient[MODIFIED]);
3232 }
3233 
3234 // --------------------------------------------------------
3235 // DualizerPreprocessor
3236 // --------------------------------------------------------
3237 
3240  RETURN_VALUE_IF_NULL(lp, false);
3241  if (parameters_.solve_dual_problem() == GlopParameters::NEVER_DO) {
3242  return false;
3243  }
3244 
3245  // Store the original problem size and direction.
3246  primal_num_cols_ = lp->num_variables();
3247  primal_num_rows_ = lp->num_constraints();
3248  primal_is_maximization_problem_ = lp->IsMaximizationProblem();
3249 
3250  // If we need to decide whether or not to take the dual, we only take it when
3251  // the matrix has more rows than columns. The number of rows of a linear
3252  // program gives the size of the square matrices we need to invert and the
3253  // order of iterations of the simplex method. So solving a program with less
3254  // rows is likely a better alternative. Note that the number of row of the
3255  // dual is the number of column of the primal.
3256  //
3257  // Note however that the default is a conservative factor because if the
3258  // user gives us a primal program, we assume he knows what he is doing and
3259  // sometimes a problem is a lot faster to solve in a given formulation
3260  // even if its dimension would say otherwise.
3261  //
3262  // Another reason to be conservative, is that the number of columns of the
3263  // dual is the number of rows of the primal plus up to two times the number of
3264  // columns of the primal.
3265  //
3266  // TODO(user): This effect can be lowered if we use some of the extra
3267  // variables as slack variable which we are not doing at this point.
3268  if (parameters_.solve_dual_problem() == GlopParameters::LET_SOLVER_DECIDE) {
3269  if (1.0 * primal_num_rows_.value() <
3270  parameters_.dualizer_threshold() * primal_num_cols_.value()) {
3271  return false;
3272  }
3273  }
3274 
3275  // Save the linear program bounds.
3276  // Also make sure that all the bounded variable have at least one bound set to
3277  // zero. This will be needed to post-solve a dual-basic solution into a
3278  // primal-basic one.
3279  const ColIndex num_cols = lp->num_variables();
3280  variable_lower_bounds_.assign(num_cols, 0.0);
3281  variable_upper_bounds_.assign(num_cols, 0.0);
3282  for (ColIndex col(0); col < num_cols; ++col) {
3283  const Fractional lower = lp->variable_lower_bounds()[col];
3284  const Fractional upper = lp->variable_upper_bounds()[col];
3285 
3286  // We need to shift one of the bound to zero.
3287  variable_lower_bounds_[col] = lower;
3288  variable_upper_bounds_[col] = upper;
3289  const Fractional value = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3290  if (value != 0.0) {
3291  lp->SetVariableBounds(col, lower - value, upper - value);
3292  SubtractColumnMultipleFromConstraintBound(col, value, lp);
3293  }
3294  }
3295 
3296  // Fill the information that will be needed during postsolve.
3297  //
3298  // TODO(user): This will break if PopulateFromDual() is changed. so document
3299  // the convention or make the function fill these vectors?
3300  dual_status_correspondence_.clear();
3301  for (RowIndex row(0); row < primal_num_rows_; ++row) {
3302  const Fractional lower_bound = lp->constraint_lower_bounds()[row];
3303  const Fractional upper_bound = lp->constraint_upper_bounds()[row];
3304  if (lower_bound == upper_bound) {
3305  dual_status_correspondence_.push_back(VariableStatus::FIXED_VALUE);
3306  } else if (upper_bound != kInfinity) {
3307  dual_status_correspondence_.push_back(VariableStatus::AT_UPPER_BOUND);
3308  } else if (lower_bound != -kInfinity) {
3309  dual_status_correspondence_.push_back(VariableStatus::AT_LOWER_BOUND);
3310  } else {
3311  LOG(DFATAL) << "There should be no free constraint in this lp.";
3312  }
3313  }
3314  slack_or_surplus_mapping_.clear();
3315  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3316  const Fractional lower_bound = lp->variable_lower_bounds()[col];
3317  const Fractional upper_bound = lp->variable_upper_bounds()[col];
3318  if (lower_bound != -kInfinity) {
3319  dual_status_correspondence_.push_back(
3320  upper_bound == lower_bound ? VariableStatus::FIXED_VALUE
3322  slack_or_surplus_mapping_.push_back(col);
3323  }
3324  }
3325  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3326  const Fractional lower_bound = lp->variable_lower_bounds()[col];
3327  const Fractional upper_bound = lp->variable_upper_bounds()[col];
3328  if (upper_bound != kInfinity) {
3329  dual_status_correspondence_.push_back(
3330  upper_bound == lower_bound ? VariableStatus::FIXED_VALUE
3332  slack_or_surplus_mapping_.push_back(col);
3333  }
3334  }
3335 
3336  // TODO(user): There are two different ways to deal with ranged rows when
3337  // taking the dual. The default way is to duplicate such rows, see
3338  // PopulateFromDual() for details. Another way is to call
3339  // lp->AddSlackVariablesForFreeAndBoxedRows() before calling
3340  // PopulateFromDual(). Adds an option to switch between the two as this may
3341  // change the running time?
3342  //
3343  // Note however that the default algorithm is likely to result in a faster
3344  // solving time because the dual program will have less rows.
3345  LinearProgram dual;
3346  dual.PopulateFromDual(*lp, &duplicated_rows_);
3347  dual.Swap(lp);
3348  return true;
3349 }
3350 
3351 // Note(user): This assumes that LinearProgram.PopulateFromDual() uses
3352 // the first ColIndex and RowIndex for the rows and columns of the given
3353 // problem.
3356  RETURN_IF_NULL(solution);
3357 
3358  DenseRow new_primal_values(primal_num_cols_, 0.0);
3359  VariableStatusRow new_variable_statuses(primal_num_cols_,
3361  DCHECK_LE(primal_num_cols_, RowToColIndex(solution->dual_values.size()));
3362  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3363  RowIndex row = ColToRowIndex(col);
3364  const Fractional lower = variable_lower_bounds_[col];
3365  const Fractional upper = variable_upper_bounds_[col];
3366 
3367  // The new variable value corresponds to the dual value of the dual.
3368  // The shift applied during presolve needs to be removed.
3369  const Fractional shift = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3370  new_primal_values[col] = solution->dual_values[row] + shift;
3371 
3372  // A variable will be VariableStatus::BASIC if the dual constraint is not.
3373  if (solution->constraint_statuses[row] != ConstraintStatus::BASIC) {
3374  new_variable_statuses[col] = VariableStatus::BASIC;
3375  } else {
3376  // Otherwise, the dual value must be zero (if the solution is feasible),
3377  // and the variable is at an exact bound or zero if it is
3378  // VariableStatus::FREE. Note that this works because the bounds are
3379  // shifted to 0.0 in the presolve!
3380  new_variable_statuses[col] = ComputeVariableStatus(shift, lower, upper);
3381  }
3382  }
3383 
3384  // A basic variable that corresponds to slack/surplus variable is the same as
3385  // a basic row. The new variable status (that was just set to
3386  // VariableStatus::BASIC above)
3387  // needs to be corrected and depends on the variable type (slack/surplus).
3388  const ColIndex begin = RowToColIndex(primal_num_rows_);
3389  const ColIndex end = dual_status_correspondence_.size();
3390  DCHECK_GE(solution->variable_statuses.size(), end);
3391  DCHECK_EQ(end - begin, slack_or_surplus_mapping_.size());
3392  for (ColIndex index(begin); index < end; ++index) {
3393  if (solution->variable_statuses[index] == VariableStatus::BASIC) {
3394  const ColIndex col = slack_or_surplus_mapping_[index - begin];
3395  const VariableStatus status = dual_status_correspondence_[index];
3396 
3397  // The new variable value is set to its exact bound because the dual
3398  // variable value can be imprecise.
3399  new_variable_statuses[col] = status;
3402  new_primal_values[col] = variable_upper_bounds_[col];
3403  } else {
3405  new_primal_values[col] = variable_lower_bounds_[col];
3406  }
3407  }
3408  }
3409 
3410  // Note the <= in the DCHECK, since we may need to add variables when taking
3411  // the dual.
3412  DCHECK_LE(primal_num_rows_, ColToRowIndex(solution->primal_values.size()));
3413  DenseColumn new_dual_values(primal_num_rows_, 0.0);
3414  ConstraintStatusColumn new_constraint_statuses(primal_num_rows_,
3416 
3417  // Note that the sign need to be corrected because of the special behavior of
3418  // PopulateFromDual() on a maximization problem, see the comment in the
3419  // declaration of PopulateFromDual().
3420  Fractional sign = primal_is_maximization_problem_ ? -1 : 1;
3421  for (RowIndex row(0); row < primal_num_rows_; ++row) {
3422  const ColIndex col = RowToColIndex(row);
3423  new_dual_values[row] = sign * solution->primal_values[col];
3424 
3425  // A constraint will be ConstraintStatus::BASIC if the dual variable is not.
3426  if (solution->variable_statuses[col] != VariableStatus::BASIC) {
3427  new_constraint_statuses[row] = ConstraintStatus::BASIC;
3428  if (duplicated_rows_[row] != kInvalidCol) {
3429  if (solution->variable_statuses[duplicated_rows_[row]] ==
3431  // The duplicated row is always about the lower bound.
3432  new_constraint_statuses[row] = ConstraintStatus::AT_LOWER_BOUND;
3433  }
3434  }
3435  } else {
3436  // ConstraintStatus::AT_LOWER_BOUND/ConstraintStatus::AT_UPPER_BOUND/
3437  // ConstraintStatus::FIXED depend on the type of the constraint at this
3438  // position.
3439  new_constraint_statuses[row] =
3440  VariableToConstraintStatus(dual_status_correspondence_[col]);
3441  }
3442 
3443  // If the original row was duplicated, we need to take into account the
3444  // value of the corresponding dual column.
3445  if (duplicated_rows_[row] != kInvalidCol) {
3446  new_dual_values[row] +=
3447  sign * solution->primal_values[duplicated_rows_[row]];
3448  }
3449 
3450  // Because non-basic variable values are exactly at one of their bounds, a
3451  // new basic constraint will have a dual value exactly equal to zero.
3452  DCHECK(new_dual_values[row] == 0 ||
3453  new_constraint_statuses[row] != ConstraintStatus::BASIC);
3454  }
3455 
3456  solution->status = ChangeStatusToDualStatus(solution->status);
3457  new_primal_values.swap(solution->primal_values);
3458  new_dual_values.swap(solution->dual_values);
3459  new_variable_statuses.swap(solution->variable_statuses);
3460  new_constraint_statuses.swap(solution->constraint_statuses);
3461 }
3462 
3464  ProblemStatus status) const {
3465  switch (status) {
3478  default:
3479  return status;
3480  }
3481 }
3482 
3483 // --------------------------------------------------------
3484 // ShiftVariableBoundsPreprocessor
3485 // --------------------------------------------------------
3486 
3489  RETURN_VALUE_IF_NULL(lp, false);
3490 
3491  // Save the linear program bounds before shifting them.
3492  bool all_variable_domains_contain_zero = true;
3493  const ColIndex num_cols = lp->num_variables();
3494  variable_initial_lbs_.assign(num_cols, 0.0);
3495  variable_initial_ubs_.assign(num_cols, 0.0);
3496  for (ColIndex col(0); col < num_cols; ++col) {
3497  variable_initial_lbs_[col] = lp->variable_lower_bounds()[col];
3498  variable_initial_ubs_[col] = lp->variable_upper_bounds()[col];
3499  if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3500  all_variable_domains_contain_zero = false;
3501  }
3502  }
3503  VLOG(1) << "Maximum variable bounds magnitude (before shift): "
3504  << ComputeMaxVariableBoundsMagnitude(*lp);
3505 
3506  // Abort early if there is nothing to do.
3507  if (all_variable_domains_contain_zero) return false;
3508 
3509  // Shift the variable bounds and compute the changes to the constraint bounds
3510  // and objective offset in a precise way.
3511  int num_bound_shifts = 0;
3512  const RowIndex num_rows = lp->num_constraints();
3513  KahanSum objective_offset;
3514  absl::StrongVector<RowIndex, KahanSum> row_offsets(num_rows.value());
3515  offsets_.assign(num_cols, 0.0);
3516  for (ColIndex col(0); col < num_cols; ++col) {
3517  if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3518  Fractional offset = MinInMagnitudeOrZeroIfInfinite(
3519  variable_initial_lbs_[col], variable_initial_ubs_[col]);
3520  if (in_mip_context_ && lp->IsVariableInteger(col)) {
3521  // In the integer case, we truncate the number because if for instance
3522  // the lower bound is a positive integer + epsilon, we only want to
3523  // shift by the integer and leave the lower bound at epsilon.
3524  //
3525  // TODO(user): This would not be needed, if we always make the bound
3526  // of an integer variable integer before applying this preprocessor.
3527  offset = trunc(offset);
3528  } else {
3529  DCHECK_NE(offset, 0.0);
3530  }
3531  offsets_[col] = offset;
3532  lp->SetVariableBounds(col, variable_initial_lbs_[col] - offset,
3533  variable_initial_ubs_[col] - offset);
3534  const SparseColumn& sparse_column = lp->GetSparseColumn(col);
3535  for (const SparseColumn::Entry e : sparse_column) {
3536  row_offsets[e.row()].Add(e.coefficient() * offset);
3537  }
3538  objective_offset.Add(lp->objective_coefficients()[col] * offset);
3539  ++num_bound_shifts;
3540  }
3541  }
3542  VLOG(1) << "Maximum variable bounds magnitude (after " << num_bound_shifts
3543  << " shifts): " << ComputeMaxVariableBoundsMagnitude(*lp);
3544 
3545  // Apply the changes to the constraint bound and objective offset.
3546  for (RowIndex row(0); row < num_rows; ++row) {
3547  lp->SetConstraintBounds(
3548  row, lp->constraint_lower_bounds()[row] - row_offsets[row].Value(),
3549  lp->constraint_upper_bounds()[row] - row_offsets[row].Value());
3550  }
3551  lp->SetObjectiveOffset(lp->objective_offset() + objective_offset.Value());
3552  return true;
3553 }
3554 
3556  ProblemSolution* solution) const {
3558  RETURN_IF_NULL(solution);
3559  const ColIndex num_cols = solution->variable_statuses.size();
3560  for (ColIndex col(0); col < num_cols; ++col) {
3561  if (in_mip_context_) {
3562  solution->primal_values[col] += offsets_[col];
3563  } else {
3564  switch (solution->variable_statuses[col]) {
3566  ABSL_FALLTHROUGH_INTENDED;
3568  solution->primal_values[col] = variable_initial_lbs_[col];
3569  break;
3571  solution->primal_values[col] = variable_initial_ubs_[col];
3572  break;
3573  case VariableStatus::BASIC:
3574  solution->primal_values[col] += offsets_[col];
3575  break;
3576  case VariableStatus::FREE:
3577  break;
3578  }
3579  }
3580  }
3581 }
3582 
3583 // --------------------------------------------------------
3584 // ScalingPreprocessor
3585 // --------------------------------------------------------
3586 
3589  RETURN_VALUE_IF_NULL(lp, false);
3590  if (!parameters_.use_scaling()) return false;
3591 
3592  // Save the linear program bounds before scaling them.
3593  const ColIndex num_cols = lp->num_variables();
3594  variable_lower_bounds_.assign(num_cols, 0.0);
3595  variable_upper_bounds_.assign(num_cols, 0.0);
3596  for (ColIndex col(0); col < num_cols; ++col) {
3597  variable_lower_bounds_[col] = lp->variable_lower_bounds()[col];
3598  variable_upper_bounds_[col] = lp->variable_upper_bounds()[col];
3599  }
3600 
3601  // See the doc of these functions for more details.
3602  // It is important to call Scale() before the other two.
3603  Scale(lp, &scaler_, parameters_.scaling_method());
3604  cost_scaling_factor_ = lp->ScaleObjective(parameters_.cost_scaling());
3605  bound_scaling_factor_ = lp->ScaleBounds();
3606 
3607  return true;
3608 }
3609 
3612  RETURN_IF_NULL(solution);
3613 
3614  scaler_.ScaleRowVector(false, &(solution->primal_values));
3615  for (ColIndex col(0); col < solution->primal_values.size(); ++col) {
3616  solution->primal_values[col] *= bound_scaling_factor_;
3617  }
3618 
3619  scaler_.ScaleColumnVector(false, &(solution->dual_values));
3620  for (RowIndex row(0); row < solution->dual_values.size(); ++row) {
3621  solution->dual_values[row] *= cost_scaling_factor_;
3622  }
3623 
3624  // Make sure the variable are at they exact bounds according to their status.
3625  // This just remove a really low error (about 1e-15) but allows to keep the
3626  // variables at their exact bounds.
3627  const ColIndex num_cols = solution->primal_values.size();
3628  for (ColIndex col(0); col < num_cols; ++col) {
3629  switch (solution->variable_statuses[col]) {
3631  ABSL_FALLTHROUGH_INTENDED;
3633  solution->primal_values[col] = variable_upper_bounds_[col];
3634  break;
3636  solution->primal_values[col] = variable_lower_bounds_[col];
3637  break;
3638  case VariableStatus::FREE:
3639  ABSL_FALLTHROUGH_INTENDED;
3640  case VariableStatus::BASIC:
3641  break;
3642  }
3643  }
3644 }
3645 
3646 // --------------------------------------------------------
3647 // ToMinimizationPreprocessor
3648 // --------------------------------------------------------
3649 
3652  RETURN_VALUE_IF_NULL(lp, false);
3653  if (lp->IsMaximizationProblem()) {
3654  for (ColIndex col(0); col < lp->num_variables(); ++col) {
3655  const Fractional coeff = lp->objective_coefficients()[col];
3656  if (coeff != 0.0) {
3657  lp->SetObjectiveCoefficient(col, -coeff);
3658  }
3659  }
3660  lp->SetMaximizationProblem(false);
3663  }
3664  return false;
3665 }
3666 
3668  ProblemSolution* solution) const {}
3669 
3670 // --------------------------------------------------------
3671 // AddSlackVariablesPreprocessor
3672 // --------------------------------------------------------
3673 
3676  RETURN_VALUE_IF_NULL(lp, false);
3678  /*detect_integer_constraints=*/true);
3679  first_slack_col_ = lp->GetFirstSlackVariable();
3680  return true;
3681 }
3682 
3684  ProblemSolution* solution) const {
3686  RETURN_IF_NULL(solution);
3687 
3688  // Compute constraint statuses from statuses of slack variables.
3689  const RowIndex num_rows = solution->dual_values.size();
3690  for (RowIndex row(0); row < num_rows; ++row) {
3691  const ColIndex slack_col = first_slack_col_ + RowToColIndex(row);
3692  const VariableStatus variable_status =
3693  solution->variable_statuses[slack_col];
3694  ConstraintStatus constraint_status = ConstraintStatus::FREE;
3695  // The slack variables have reversed bounds - if the value of the variable
3696  // is at one bound, the value of the constraint is at the opposite bound.
3697  switch (variable_status) {
3699  constraint_status = ConstraintStatus::AT_UPPER_BOUND;
3700  break;
3702  constraint_status = ConstraintStatus::AT_LOWER_BOUND;
3703  break;
3704  default:
3705  constraint_status = VariableToConstraintStatus(variable_status);
3706  break;
3707  }
3708  solution->constraint_statuses[row] = constraint_status;
3709  }
3710 
3711  // Drop the primal values and variable statuses for slack variables.
3712  solution->primal_values.resize(first_slack_col_, 0.0);
3713  solution->variable_statuses.resize(first_slack_col_, VariableStatus::FREE);
3714 }
3715 
3716 } // namespace glop
3717 } // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:884
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
#define VLOG(verboselevel)
Definition: base/logging.h:978
void resize(size_type new_size)
size_type size() const
void push_back(const value_type &x)
void swap(StrongVector &x)
void Add(const FpNumber &value)
Definition: accurate_sum.h:29
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
void RecoverSolution(ProblemSolution *solution) const final
void MarkColumnForDeletionWithState(ColIndex col, Fractional value, VariableStatus status)
const DenseBooleanRow & GetMarkedColumns() const
Definition: preprocessor.h:166
void RestoreDeletedColumns(ProblemSolution *solution) const
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
ProblemStatus ChangeStatusToDualStatus(ProblemStatus status) const
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
SparseMatrix * GetMutableTransposeSparseMatrix()
Definition: lp_data.cc:385
void SetObjectiveScalingFactor(Fractional objective_scaling_factor)
Definition: lp_data.cc:335
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:248
const SparseMatrix & GetTransposeSparseMatrix() const
Definition: lp_data.cc:375
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:330
const SparseMatrix & GetSparseMatrix() const
Definition: lp_data.h:174
const DenseRow & variable_lower_bounds() const
Definition: lp_data.h:228
const DenseColumn & constraint_lower_bounds() const
Definition: lp_data.h:214
Fractional ScaleObjective(GlopParameters::CostScalingAlgorithm method)
Definition: lp_data.cc:1186
const DenseRow & objective_coefficients() const
Definition: lp_data.h:222
Fractional GetObjectiveCoefficientForMinimizationVersion(ColIndex col) const
Definition: lp_data.cc:418
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:308
void Swap(LinearProgram *linear_program)
Definition: lp_data.cc:1029
SparseColumn * GetMutableSparseColumn(ColIndex col)
Definition: lp_data.cc:412
void AddSlackVariablesWhereNecessary(bool detect_integer_constraints)
Definition: lp_data.cc:695
const DenseColumn & constraint_upper_bounds() const
Definition: lp_data.h:217
bool IsVariableInteger(ColIndex col) const
Definition: lp_data.cc:294
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:325
void DeleteRows(const DenseBooleanColumn &rows_to_delete)
Definition: lp_data.cc:1256
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: lp_data.cc:1063
const DenseRow & variable_upper_bounds() const
Definition: lp_data.h:231
void PopulateFromDual(const LinearProgram &dual, RowToColMapping *duplicated_rows)
Definition: lp_data.cc:762
Fractional objective_scaling_factor() const
Definition: lp_data.h:260
void SetMaximizationProblem(bool maximize)
Definition: lp_data.cc:342
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:408
void RecoverSolution(ProblemSolution *solution) const override
bool IsSmallerWithinPreprocessorZeroTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:84
Preprocessor(const GlopParameters *parameters)
Definition: preprocessor.cc:46
const GlopParameters & parameters_
Definition: preprocessor.h:92
bool IsSmallerWithinFeasibilityTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:80
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RestoreDeletedRows(ProblemSolution *solution) const
const DenseBooleanColumn & GetMarkedRows() const
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
SingletonUndo(OperationType type, const LinearProgram &lp, MatrixEntry e, ConstraintStatus status)
void Undo(const GlopParameters &parameters, const SparseMatrix &deleted_columns, const SparseMatrix &deleted_rows, ProblemSolution *solution) const
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:181
void PopulateFromTranspose(const Matrix &input)
Definition: sparse.cc:181
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:180
void PopulateFromZero(RowIndex num_rows, ColIndex num_cols)
Definition: sparse.cc:164
void ScaleColumnVector(bool up, DenseColumn *column_vector) const
void ScaleRowVector(bool up, DenseRow *row_vector) const
Fractional LookUpCoefficient(Index index) const
void RemoveNearZeroEntriesWithWeights(Fractional threshold, const DenseVector &weights)
void MultiplyByConstant(Fractional factor)
void PopulateFromSparseVector(const SparseVector &sparse_vector)
void assign(IntType size, const T &v)
Definition: lp_types.h:274
Fractional SumWithout(Fractional x) const
void RecoverSolution(ProblemSolution *solution) const final
void RemoveZeroCostUnconstrainedVariable(ColIndex col, Fractional target_bound, LinearProgram *lp)
void RecoverSolution(ProblemSolution *solution) const final
SatParameters parameters
SharedTimeLimit * time_limit
const std::string name
int64 value
int64_t int64
const int INFO
Definition: log_severity.h:31
RowIndex row
Definition: markowitz.cc:175
const RowIndex kInvalidRow(-1)
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
void FixConstraintWithFixedStatuses(const DenseColumn &row_lower_bounds, const DenseColumn &row_upper_bounds, ProblemSolution *solution)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
bool IsFinite(Fractional value)
Definition: lp_types.h:90
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:51
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
ColMapping FindProportionalColumns(const SparseMatrix &matrix, Fractional tolerance)
void Scale(LinearProgram *lp, SparseMatrixScaler *scaler)
const double kInfinity
Definition: lp_types.h:83
const ColIndex kInvalidCol(-1)
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
bool IsSmallerWithinTolerance(FloatType x, FloatType y, FloatType tolerance)
Definition: fp_utils.h:153
bool IsIntegerWithinTolerance(FloatType x, FloatType tolerance)
Definition: fp_utils.h:161
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
int index
Definition: pack.cc:508
EntryIndex num_entries
Fractional scaled_cost
ColIndex col
ColIndex representative
#define RUN_PREPROCESSOR(name)
Definition: preprocessor.cc:58
int64 delta
Definition: resource.cc:1684
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
#define RETURN_VALUE_IF_NULL(x, v)
Definition: return_macros.h:26
Fractional target_bound
int64 cost
int64 bound
int64 coefficient
std::vector< double > lower_bounds
std::vector< double > upper_bounds
#define SCOPED_INSTRUCTION_COUNT(time_limit)
Definition: stats.h:439
ConstraintStatusColumn constraint_statuses
Definition: lp_data.h:676
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:41