| // Ceres Solver - A fast non-linear least squares minimizer |
| // Copyright 2023 Google Inc. All rights reserved. |
| // http://ceres-solver.org/ |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are met: |
| // |
| // * Redistributions of source code must retain the above copyright notice, |
| // this list of conditions and the following disclaimer. |
| // * Redistributions in binary form must reproduce the above copyright notice, |
| // this list of conditions and the following disclaimer in the documentation |
| // and/or other materials provided with the distribution. |
| // * Neither the name of Google Inc. nor the names of its contributors may be |
| // used to endorse or promote products derived from this software without |
| // specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| // POSSIBILITY OF SUCH DAMAGE. |
| // |
| // Author: sameeragarwal@google.com (Sameer Agarwal) |
| |
| #include "ceres/line_search.h" |
| |
| #include <algorithm> |
| #include <cmath> |
| #include <iomanip> |
| #include <map> |
| #include <memory> |
| #include <ostream> // NOLINT |
| #include <string> |
| #include <vector> |
| |
| #include "absl/log/check.h" |
| #include "absl/log/log.h" |
| #include "absl/strings/str_cat.h" |
| #include "absl/strings/str_format.h" |
| #include "absl/time/clock.h" |
| #include "absl/time/time.h" |
| #include "ceres/evaluator.h" |
| #include "ceres/function_sample.h" |
| #include "ceres/internal/eigen.h" |
| #include "ceres/map_util.h" |
| #include "ceres/polynomial.h" |
| #include "ceres/types.h" |
| |
| namespace ceres::internal { |
| |
| namespace { |
| // Precision used for floating point values in error message output. |
| const int kErrorMessageNumericPrecision = 8; |
| } // namespace |
| |
| std::ostream& operator<<(std::ostream& os, const FunctionSample& sample); |
| |
| // Convenience stream operator for pushing FunctionSamples into log messages. |
| std::ostream& operator<<(std::ostream& os, const FunctionSample& sample) { |
| os << sample.ToDebugString(); |
| return os; |
| } |
| |
| LineSearch::~LineSearch() = default; |
| |
| LineSearch::LineSearch(const LineSearch::Options& options) |
| : options_(options) {} |
| |
| std::unique_ptr<LineSearch> LineSearch::Create( |
| const LineSearchType line_search_type, |
| const LineSearch::Options& options, |
| std::string* error) { |
| switch (line_search_type) { |
| case ceres::ARMIJO: |
| return std::make_unique<ArmijoLineSearch>(options); |
| case ceres::WOLFE: |
| return std::make_unique<WolfeLineSearch>(options); |
| default: |
| *error = absl::StrCat("Invalid line search algorithm type: ", |
| LineSearchTypeToString(line_search_type), |
| ", unable to create line search."); |
| } |
| return nullptr; |
| } |
| |
| LineSearchFunction::LineSearchFunction(Evaluator* evaluator) |
| : evaluator_(evaluator), |
| position_(evaluator->NumParameters()), |
| direction_(evaluator->NumEffectiveParameters()), |
| scaled_direction_(evaluator->NumEffectiveParameters()) {} |
| |
| void LineSearchFunction::Init(const Vector& position, const Vector& direction) { |
| position_ = position; |
| direction_ = direction; |
| } |
| |
| void LineSearchFunction::Evaluate(const double x, |
| const bool evaluate_gradient, |
| FunctionSample* output) { |
| output->x = x; |
| output->vector_x_is_valid = false; |
| output->value_is_valid = false; |
| output->gradient_is_valid = false; |
| output->vector_gradient_is_valid = false; |
| |
| scaled_direction_ = output->x * direction_; |
| output->vector_x.resize(position_.rows(), 1); |
| if (!evaluator_->Plus(position_.data(), |
| scaled_direction_.data(), |
| output->vector_x.data())) { |
| return; |
| } |
| output->vector_x_is_valid = true; |
| |
| double* gradient = nullptr; |
| if (evaluate_gradient) { |
| output->vector_gradient.resize(direction_.rows(), 1); |
| gradient = output->vector_gradient.data(); |
| } |
| const bool eval_status = evaluator_->Evaluate( |
| output->vector_x.data(), &(output->value), nullptr, gradient, nullptr); |
| |
| if (!eval_status || !std::isfinite(output->value)) { |
| return; |
| } |
| |
| output->value_is_valid = true; |
| if (!evaluate_gradient) { |
| return; |
| } |
| |
| output->gradient = direction_.dot(output->vector_gradient); |
| if (!std::isfinite(output->gradient)) { |
| return; |
| } |
| |
| output->gradient_is_valid = true; |
| output->vector_gradient_is_valid = true; |
| } |
| |
| double LineSearchFunction::DirectionInfinityNorm() const { |
| return direction_.lpNorm<Eigen::Infinity>(); |
| } |
| |
| void LineSearchFunction::ResetTimeStatistics() { |
| const std::map<std::string, CallStatistics> evaluator_statistics = |
| evaluator_->Statistics(); |
| |
| initial_evaluator_residual_time = |
| FindWithDefault( |
| evaluator_statistics, "Evaluator::Residual", CallStatistics()) |
| .time; |
| initial_evaluator_jacobian_time = |
| FindWithDefault( |
| evaluator_statistics, "Evaluator::Jacobian", CallStatistics()) |
| .time; |
| } |
| |
| void LineSearchFunction::TimeStatistics( |
| absl::Duration* cost_evaluation_time, |
| absl::Duration* gradient_evaluation_time) const { |
| const std::map<std::string, CallStatistics> evaluator_time_statistics = |
| evaluator_->Statistics(); |
| *cost_evaluation_time = |
| FindWithDefault( |
| evaluator_time_statistics, "Evaluator::Residual", CallStatistics()) |
| .time - |
| initial_evaluator_residual_time; |
| // Strictly speaking this will slightly underestimate the time spent |
| // evaluating the gradient of the line search univariate cost function as it |
| // does not count the time spent performing the dot product with the direction |
| // vector. However, this will typically be small by comparison, and also |
| // allows direct subtraction of the timing information from the totals for |
| // the evaluator returned in the solver summary. |
| *gradient_evaluation_time = |
| FindWithDefault( |
| evaluator_time_statistics, "Evaluator::Jacobian", CallStatistics()) |
| .time - |
| initial_evaluator_jacobian_time; |
| } |
| |
| void LineSearch::Search(double step_size_estimate, |
| double initial_cost, |
| double initial_gradient, |
| Summary* summary) const { |
| const absl::Time start_time = absl::Now(); |
| CHECK(summary != nullptr); |
| *summary = LineSearch::Summary(); |
| |
| summary->cost_evaluation_time = absl::ZeroDuration(); |
| summary->gradient_evaluation_time = absl::ZeroDuration(); |
| summary->polynomial_minimization_time = absl::ZeroDuration(); |
| options().function->ResetTimeStatistics(); |
| this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary); |
| options().function->TimeStatistics(&summary->cost_evaluation_time, |
| &summary->gradient_evaluation_time); |
| |
| summary->total_time = absl::Now() - start_time; |
| } |
| |
| // Returns step_size \in [min_step_size, max_step_size] which minimizes the |
| // polynomial of degree defined by interpolation_type which interpolates all |
| // of the provided samples with valid values. |
| double LineSearch::InterpolatingPolynomialMinimizingStepSize( |
| const LineSearchInterpolationType& interpolation_type, |
| const FunctionSample& lowerbound, |
| const FunctionSample& previous, |
| const FunctionSample& current, |
| const double min_step_size, |
| const double max_step_size) const { |
| if (!current.value_is_valid || |
| (interpolation_type == BISECTION && max_step_size <= current.x)) { |
| // Either: sample is invalid; or we are using BISECTION and contracting |
| // the step size. |
| return std::min(std::max(current.x * 0.5, min_step_size), max_step_size); |
| } else if (interpolation_type == BISECTION) { |
| CHECK_GT(max_step_size, current.x); |
| // We are expanding the search (during a Wolfe bracketing phase) using |
| // BISECTION interpolation. Using BISECTION when trying to expand is |
| // strictly speaking an oxymoron, but we define this to mean always taking |
| // the maximum step size so that the Armijo & Wolfe implementations are |
| // agnostic to the interpolation type. |
| return max_step_size; |
| } |
| // Only check if lower-bound is valid here, where it is required |
| // to avoid replicating current.value_is_valid == false |
| // behaviour in WolfeLineSearch. |
| CHECK(lowerbound.value_is_valid) |
| << std::scientific << std::setprecision(kErrorMessageNumericPrecision) |
| << "Ceres bug: lower-bound sample for interpolation is invalid, " |
| << "please contact the developers!, interpolation_type: " |
| << LineSearchInterpolationTypeToString(interpolation_type) |
| << ", lowerbound: " << lowerbound << ", previous: " << previous |
| << ", current: " << current; |
| |
| // Select step size by interpolating the function and gradient values |
| // and minimizing the corresponding polynomial. |
| std::vector<FunctionSample> samples; |
| samples.push_back(lowerbound); |
| |
| if (interpolation_type == QUADRATIC) { |
| // Two point interpolation using function values and the |
| // gradient at the lower bound. |
| samples.emplace_back(current.x, current.value); |
| |
| if (previous.value_is_valid) { |
| // Three point interpolation, using function values and the |
| // gradient at the lower bound. |
| samples.emplace_back(previous.x, previous.value); |
| } |
| } else if (interpolation_type == CUBIC) { |
| // Two point interpolation using the function values and the gradients. |
| samples.push_back(current); |
| |
| if (previous.value_is_valid) { |
| // Three point interpolation using the function values and |
| // the gradients. |
| samples.push_back(previous); |
| } |
| } else { |
| LOG(FATAL) << "Ceres bug: No handler for interpolation_type: " |
| << LineSearchInterpolationTypeToString(interpolation_type) |
| << ", please contact the developers!"; |
| } |
| |
| double step_size = 0.0, unused_min_value = 0.0; |
| MinimizeInterpolatingPolynomial( |
| samples, min_step_size, max_step_size, &step_size, &unused_min_value); |
| return step_size; |
| } |
| |
| ArmijoLineSearch::ArmijoLineSearch(const LineSearch::Options& options) |
| : LineSearch(options) {} |
| |
| void ArmijoLineSearch::DoSearch(const double step_size_estimate, |
| const double initial_cost, |
| const double initial_gradient, |
| Summary* summary) const { |
| CHECK_GE(step_size_estimate, 0.0); |
| CHECK_GT(options().sufficient_decrease, 0.0); |
| CHECK_LT(options().sufficient_decrease, 1.0); |
| CHECK_GT(options().max_num_iterations, 0); |
| LineSearchFunction* function = options().function; |
| |
| // Note initial_cost & initial_gradient are evaluated at step_size = 0, |
| // not step_size_estimate, which is our starting guess. |
| FunctionSample initial_position(0.0, initial_cost, initial_gradient); |
| initial_position.vector_x = function->position(); |
| initial_position.vector_x_is_valid = true; |
| |
| const double descent_direction_max_norm = function->DirectionInfinityNorm(); |
| FunctionSample previous; |
| FunctionSample current; |
| |
| // As the Armijo line search algorithm always uses the initial point, for |
| // which both the function value and derivative are known, when fitting a |
| // minimizing polynomial, we can fit up to a quadratic without requiring the |
| // gradient at the current query point. |
| const bool kEvaluateGradient = options().interpolation_type == CUBIC; |
| |
| ++summary->num_function_evaluations; |
| if (kEvaluateGradient) { |
| ++summary->num_gradient_evaluations; |
| } |
| |
| function->Evaluate(step_size_estimate, kEvaluateGradient, ¤t); |
| while (!current.value_is_valid || |
| current.value > (initial_cost + options().sufficient_decrease * |
| initial_gradient * current.x)) { |
| // If current.value_is_valid is false, we treat it as if the cost at that |
| // point is not large enough to satisfy the sufficient decrease condition. |
| ++summary->num_iterations; |
| if (summary->num_iterations >= options().max_num_iterations) { |
| summary->error = absl::StrFormat( |
| "Line search failed: Armijo failed to find a point " |
| "satisfying the sufficient decrease condition within " |
| "specified max_num_iterations: %d.", |
| options().max_num_iterations); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| return; |
| } |
| |
| const absl::Time polynomial_minimization_start_time = absl::Now(); |
| const double step_size = this->InterpolatingPolynomialMinimizingStepSize( |
| options().interpolation_type, |
| initial_position, |
| previous, |
| current, |
| (options().max_step_contraction * current.x), |
| (options().min_step_contraction * current.x)); |
| summary->polynomial_minimization_time += |
| (absl::Now() - polynomial_minimization_start_time); |
| |
| if (step_size * descent_direction_max_norm < options().min_step_size) { |
| summary->error = absl::StrFormat( |
| "Line search failed: step_size too small: %.5e " |
| "with descent_direction_max_norm: %.5e.", |
| step_size, |
| descent_direction_max_norm); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| return; |
| } |
| |
| previous = current; |
| |
| ++summary->num_function_evaluations; |
| if (kEvaluateGradient) { |
| ++summary->num_gradient_evaluations; |
| } |
| |
| function->Evaluate(step_size, kEvaluateGradient, ¤t); |
| } |
| |
| summary->optimal_point = current; |
| summary->success = true; |
| } |
| |
| WolfeLineSearch::WolfeLineSearch(const LineSearch::Options& options) |
| : LineSearch(options) {} |
| |
| void WolfeLineSearch::DoSearch(const double step_size_estimate, |
| const double initial_cost, |
| const double initial_gradient, |
| Summary* summary) const { |
| // All parameters should have been validated by the Solver, but as |
| // invalid values would produce crazy nonsense, hard check them here. |
| CHECK_GE(step_size_estimate, 0.0); |
| CHECK_GT(options().sufficient_decrease, 0.0); |
| CHECK_GT(options().sufficient_curvature_decrease, |
| options().sufficient_decrease); |
| CHECK_LT(options().sufficient_curvature_decrease, 1.0); |
| CHECK_GT(options().max_step_expansion, 1.0); |
| |
| // Note initial_cost & initial_gradient are evaluated at step_size = 0, |
| // not step_size_estimate, which is our starting guess. |
| FunctionSample initial_position(0.0, initial_cost, initial_gradient); |
| initial_position.vector_x = options().function->position(); |
| initial_position.vector_x_is_valid = true; |
| bool do_zoom_search = false; |
| // Important: The high/low in bracket_high & bracket_low refer to their |
| // _function_ values, not their step sizes i.e. it is _not_ required that |
| // bracket_low.x < bracket_high.x. |
| FunctionSample solution, bracket_low, bracket_high; |
| |
| // Wolfe bracketing phase: Increases step_size until either it finds a point |
| // that satisfies the (strong) Wolfe conditions, or an interval that brackets |
| // step sizes which satisfy the conditions. From Nocedal & Wright [1] p61 the |
| // interval: (step_size_{k-1}, step_size_{k}) contains step lengths satisfying |
| // the strong Wolfe conditions if one of the following conditions are met: |
| // |
| // 1. step_size_{k} violates the sufficient decrease (Armijo) condition. |
| // 2. f(step_size_{k}) >= f(step_size_{k-1}). |
| // 3. f'(step_size_{k}) >= 0. |
| // |
| // Caveat: If f(step_size_{k}) is invalid, then step_size is reduced, ignoring |
| // this special case, step_size monotonically increases during bracketing. |
| if (!this->BracketingPhase(initial_position, |
| step_size_estimate, |
| &bracket_low, |
| &bracket_high, |
| &do_zoom_search, |
| summary)) { |
| // Failed to find either a valid point, a valid bracket satisfying the Wolfe |
| // conditions, or even a step size > minimum tolerance satisfying the Armijo |
| // condition. |
| return; |
| } |
| |
| if (!do_zoom_search) { |
| // Either: Bracketing phase already found a point satisfying the strong |
| // Wolfe conditions, thus no Zoom required. |
| // |
| // Or: Bracketing failed to find a valid bracket or a point satisfying the |
| // strong Wolfe conditions within max_num_iterations, or whilst searching |
| // shrank the bracket width until it was below our minimum tolerance. |
| // As these are 'artificial' constraints, and we would otherwise fail to |
| // produce a valid point when ArmijoLineSearch would succeed, we return the |
| // point with the lowest cost found thus far which satisfies the Armijo |
| // condition (but not the Wolfe conditions). |
| summary->optimal_point = bracket_low; |
| summary->success = true; |
| return; |
| } |
| |
| VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision) |
| << "Starting line search zoom phase with bracket_low: " << bracket_low |
| << ", bracket_high: " << bracket_high |
| << ", bracket width: " << fabs(bracket_low.x - bracket_high.x) |
| << ", bracket abs delta cost: " |
| << fabs(bracket_low.value - bracket_high.value); |
| |
| // Wolfe Zoom phase: Called when the Bracketing phase finds an interval of |
| // non-zero, finite width that should bracket step sizes which satisfy the |
| // (strong) Wolfe conditions (before finding a step size that satisfies the |
| // conditions). Zoom successively decreases the size of the interval until a |
| // step size which satisfies the Wolfe conditions is found. The interval is |
| // defined by bracket_low & bracket_high, which satisfy: |
| // |
| // 1. The interval bounded by step sizes: bracket_low.x & bracket_high.x |
| // contains step sizes that satisfy the strong Wolfe conditions. |
| // 2. bracket_low.x is of all the step sizes evaluated *which satisfied the |
| // Armijo sufficient decrease condition*, the one which generated the |
| // smallest function value, i.e. bracket_low.value < |
| // f(all other steps satisfying Armijo). |
| // - Note that this does _not_ (necessarily) mean that initially |
| // bracket_low.value < bracket_high.value (although this is typical) |
| // e.g. when bracket_low = initial_position, and bracket_high is the |
| // first sample, and which does not satisfy the Armijo condition, |
| // but still has bracket_high.value < initial_position.value. |
| // 3. bracket_high is chosen after bracket_low, s.t. |
| // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0. |
| if (!this->ZoomPhase( |
| initial_position, bracket_low, bracket_high, &solution, summary) && |
| !solution.value_is_valid) { |
| // Failed to find a valid point (given the specified decrease parameters) |
| // within the specified bracket. |
| return; |
| } |
| // Ensure that if we ran out of iterations whilst zooming the bracket, or |
| // shrank the bracket width to < tolerance and failed to find a point which |
| // satisfies the strong Wolfe curvature condition, that we return the point |
| // amongst those found thus far, which minimizes f() and satisfies the Armijo |
| // condition. |
| |
| if (!solution.value_is_valid || solution.value > bracket_low.value) { |
| summary->optimal_point = bracket_low; |
| } else { |
| summary->optimal_point = solution; |
| } |
| |
| summary->success = true; |
| } |
| |
| // Returns true if either: |
| // |
| // A termination condition satisfying the (strong) Wolfe bracketing conditions |
| // is found: |
| // |
| // - A valid point, defined as a bracket of zero width [zoom not required]. |
| // - A valid bracket (of width > tolerance), [zoom required]. |
| // |
| // Or, searching was stopped due to an 'artificial' constraint, i.e. not |
| // a condition imposed / required by the underlying algorithm, but instead an |
| // engineering / implementation consideration. But a step which exceeds the |
| // minimum step size, and satisfies the Armijo condition was still found, |
| // and should thus be used [zoom not required]. |
| // |
| // Returns false if no step size > minimum step size was found which |
| // satisfies at least the Armijo condition. |
| bool WolfeLineSearch::BracketingPhase(const FunctionSample& initial_position, |
| const double step_size_estimate, |
| FunctionSample* bracket_low, |
| FunctionSample* bracket_high, |
| bool* do_zoom_search, |
| Summary* summary) const { |
| LineSearchFunction* function = options().function; |
| |
| FunctionSample previous = initial_position; |
| FunctionSample current; |
| |
| const double descent_direction_max_norm = function->DirectionInfinityNorm(); |
| |
| *do_zoom_search = false; |
| *bracket_low = initial_position; |
| |
| // As we require the gradient to evaluate the Wolfe condition, we always |
| // calculate it together with the value, irrespective of the interpolation |
| // type. As opposed to only calculating the gradient after the Armijo |
| // condition is satisfied, as the computational saving from this approach |
| // would be slight (perhaps even negative due to the extra call). Also, |
| // always calculating the value & gradient together protects against us |
| // reporting invalid solutions if the cost function returns slightly different |
| // function values when evaluated with / without gradients (due to numerical |
| // issues). |
| ++summary->num_function_evaluations; |
| ++summary->num_gradient_evaluations; |
| const bool kEvaluateGradient = true; |
| function->Evaluate(step_size_estimate, kEvaluateGradient, ¤t); |
| while (true) { |
| ++summary->num_iterations; |
| |
| if (current.value_is_valid && |
| (current.value > (initial_position.value + |
| options().sufficient_decrease * |
| initial_position.gradient * current.x) || |
| (previous.value_is_valid && current.value > previous.value))) { |
| // Bracket found: current step size violates Armijo sufficient decrease |
| // condition, or has stepped past an inflection point of f() relative to |
| // previous step size. |
| *do_zoom_search = true; |
| *bracket_low = previous; |
| *bracket_high = current; |
| VLOG(3) << std::scientific |
| << std::setprecision(kErrorMessageNumericPrecision) |
| << "Bracket found: current step (" << current.x |
| << ") violates Armijo sufficient condition, or has passed an " |
| << "inflection point of f() based on value."; |
| break; |
| } |
| |
| if (current.value_is_valid && |
| fabs(current.gradient) <= -options().sufficient_curvature_decrease * |
| initial_position.gradient) { |
| // Current step size satisfies the strong Wolfe conditions, and is thus a |
| // valid termination point, therefore a Zoom not required. |
| *bracket_low = current; |
| *bracket_high = current; |
| VLOG(3) << std::scientific |
| << std::setprecision(kErrorMessageNumericPrecision) |
| << "Bracketing phase found step size: " << current.x |
| << ", satisfying strong Wolfe conditions, initial_position: " |
| << initial_position << ", current: " << current; |
| break; |
| |
| } else if (current.value_is_valid && current.gradient >= 0) { |
| // Bracket found: current step size has stepped past an inflection point |
| // of f(), but Armijo sufficient decrease is still satisfied and |
| // f(current) is our best minimum thus far. Remember step size |
| // monotonically increases, thus previous_step_size < current_step_size |
| // even though f(previous) > f(current). |
| *do_zoom_search = true; |
| // Note inverse ordering from first bracket case. |
| *bracket_low = current; |
| *bracket_high = previous; |
| VLOG(3) << "Bracket found: current step (" << current.x |
| << ") satisfies Armijo, but has gradient >= 0, thus have passed " |
| << "an inflection point of f()."; |
| break; |
| |
| } else if (current.value_is_valid && |
| fabs(current.x - previous.x) * descent_direction_max_norm < |
| options().min_step_size) { |
| // We have shrunk the search bracket to a width less than our tolerance, |
| // and still not found either a point satisfying the strong Wolfe |
| // conditions, or a valid bracket containing such a point. Stop searching |
| // and set bracket_low to the size size amongst all those tested which |
| // minimizes f() and satisfies the Armijo condition. |
| |
| if (!options().is_silent) { |
| LOG(WARNING) << "Line search failed: Wolfe bracketing phase shrank " |
| << "bracket width: " << fabs(current.x - previous.x) |
| << ", to < tolerance: " << options().min_step_size |
| << ", with descent_direction_max_norm: " |
| << descent_direction_max_norm << ", and failed to find " |
| << "a point satisfying the strong Wolfe conditions or a " |
| << "bracketing containing such a point. Accepting " |
| << "point found satisfying Armijo condition only, to " |
| << "allow continuation."; |
| } |
| *bracket_low = current; |
| break; |
| |
| } else if (summary->num_iterations >= options().max_num_iterations) { |
| // Check num iterations bound here so that we always evaluate the |
| // max_num_iterations-th iteration against all conditions, and |
| // then perform no additional (unused) evaluations. |
| summary->error = absl::StrFormat( |
| "Line search failed: Wolfe bracketing phase failed to " |
| "find a point satisfying strong Wolfe conditions, or a " |
| "bracket containing such a point within specified " |
| "max_num_iterations: %d", |
| options().max_num_iterations); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| // Ensure that bracket_low is always set to the step size amongst all |
| // those tested which minimizes f() and satisfies the Armijo condition |
| // when we terminate due to the 'artificial' max_num_iterations condition. |
| *bracket_low = |
| current.value_is_valid && current.value < bracket_low->value |
| ? current |
| : *bracket_low; |
| break; |
| } |
| // Either: f(current) is invalid; or, f(current) is valid, but does not |
| // satisfy the strong Wolfe conditions itself, or the conditions for |
| // being a boundary of a bracket. |
| |
| // If f(current) is valid, (but meets no criteria) expand the search by |
| // increasing the step size. If f(current) is invalid, contract the step |
| // size. |
| // |
| // In Nocedal & Wright [1] (p60), the step-size can only increase in the |
| // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k * |
| // factor]. However this does not account for the function returning invalid |
| // values which we support, in which case we need to contract the step size |
| // whilst ensuring that we do not invert the bracket, i.e., we require that: |
| // step_size_{k-1} <= step_size_{k+1} < step_size_k. |
| const double min_step_size = |
| current.value_is_valid ? current.x : previous.x; |
| const double max_step_size = |
| current.value_is_valid ? (current.x * options().max_step_expansion) |
| : current.x; |
| |
| // We are performing 2-point interpolation only here, but the API of |
| // InterpolatingPolynomialMinimizingStepSize() allows for up to |
| // 3-point interpolation, so pad call with a sample with an invalid |
| // value that will therefore be ignored. |
| const FunctionSample unused_previous; |
| DCHECK(!unused_previous.value_is_valid); |
| // Contracts step size if f(current) is not valid. |
| const absl::Time polynomial_minimization_start_time = absl::Now(); |
| const double step_size = this->InterpolatingPolynomialMinimizingStepSize( |
| options().interpolation_type, |
| previous, |
| unused_previous, |
| current, |
| min_step_size, |
| max_step_size); |
| summary->polynomial_minimization_time += |
| (absl::Now() - polynomial_minimization_start_time); |
| if (step_size * descent_direction_max_norm < options().min_step_size) { |
| summary->error = absl::StrFormat( |
| "Line search failed: step_size too small: %.5e " |
| "with descent_direction_max_norm: %.5e", |
| step_size, |
| descent_direction_max_norm); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| return false; |
| } |
| |
| // Only advance the lower boundary (in x) of the bracket if f(current) |
| // is valid such that we can support contracting the step size when |
| // f(current) is invalid without risking inverting the bracket in x, i.e. |
| // prevent previous.x > current.x. |
| previous = current.value_is_valid ? current : previous; |
| ++summary->num_function_evaluations; |
| ++summary->num_gradient_evaluations; |
| function->Evaluate(step_size, kEvaluateGradient, ¤t); |
| } |
| |
| // Ensure that even if a valid bracket was found, we will only mark a zoom |
| // as required if the bracket's width is greater than our minimum tolerance. |
| if (*do_zoom_search && |
| fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm < |
| options().min_step_size) { |
| *do_zoom_search = false; |
| } |
| |
| return true; |
| } |
| |
| // Returns true iff solution satisfies the strong Wolfe conditions. Otherwise, |
| // on return false, if we stopped searching due to the 'artificial' condition of |
| // reaching max_num_iterations, solution is the step size amongst all those |
| // tested, which satisfied the Armijo decrease condition and minimized f(). |
| bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position, |
| FunctionSample bracket_low, |
| FunctionSample bracket_high, |
| FunctionSample* solution, |
| Summary* summary) const { |
| LineSearchFunction* function = options().function; |
| |
| CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid) |
| << std::scientific << std::setprecision(kErrorMessageNumericPrecision) |
| << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact " |
| << "the developers!, initial_position: " << initial_position |
| << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high; |
| // We do not require bracket_high.gradient_is_valid as the gradient condition |
| // for a valid bracket is only dependent upon bracket_low.gradient, and |
| // in order to minimize jacobian evaluations, bracket_high.gradient may |
| // not have been calculated (if bracket_high.value does not satisfy the |
| // Armijo sufficient decrease condition and interpolation method does not |
| // require it). |
| // |
| // We also do not require that: bracket_low.value < bracket_high.value, |
| // although this is typical. This is to deal with the case when |
| // bracket_low = initial_position, bracket_high is the first sample, |
| // and bracket_high does not satisfy the Armijo condition, but still has |
| // bracket_high.value < initial_position.value. |
| CHECK(bracket_high.value_is_valid) |
| << std::scientific << std::setprecision(kErrorMessageNumericPrecision) |
| << "Ceres bug: f_high input to Wolfe Zoom invalid, please " |
| << "contact the developers!, initial_position: " << initial_position |
| << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high; |
| |
| if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) { |
| // The third condition for a valid initial bracket: |
| // |
| // 3. bracket_high is chosen after bracket_low, s.t. |
| // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0. |
| // |
| // is not satisfied. As this can happen when the users' cost function |
| // returns inconsistent gradient values relative to the function values, |
| // we do not CHECK_LT(), but we do stop processing and return an invalid |
| // value. |
| summary->error = absl::StrFormat( |
| "Line search failed: Wolfe zoom phase passed a bracket " |
| "which does not satisfy: bracket_low.gradient * " |
| "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] " |
| "with initial_position: %s, bracket_low: %s, bracket_high:" |
| " %s, the most likely cause of which is the cost function " |
| "returning inconsistent gradient & function values.", |
| bracket_low.gradient * (bracket_high.x - bracket_low.x), |
| initial_position.ToDebugString(), |
| bracket_low.ToDebugString(), |
| bracket_high.ToDebugString()); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| solution->value_is_valid = false; |
| return false; |
| } |
| |
| const int num_bracketing_iterations = summary->num_iterations; |
| const double descent_direction_max_norm = function->DirectionInfinityNorm(); |
| |
| while (true) { |
| // Set solution to bracket_low, as it is our best step size (smallest f()) |
| // found thus far and satisfies the Armijo condition, even though it does |
| // not satisfy the Wolfe condition. |
| *solution = bracket_low; |
| if (summary->num_iterations >= options().max_num_iterations) { |
| summary->error = absl::StrFormat( |
| "Line search failed: Wolfe zoom phase failed to " |
| "find a point satisfying strong Wolfe conditions " |
| "within specified max_num_iterations: %d, " |
| "(num iterations taken for bracketing: %d).", |
| options().max_num_iterations, |
| num_bracketing_iterations); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| return false; |
| } |
| if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm < |
| options().min_step_size) { |
| // Bracket width has been reduced below tolerance, and no point satisfying |
| // the strong Wolfe conditions has been found. |
| summary->error = absl::StrFormat( |
| "Line search failed: Wolfe zoom bracket width: %.5e " |
| "too small with descent_direction_max_norm: %.5e.", |
| fabs(bracket_high.x - bracket_low.x), |
| descent_direction_max_norm); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| return false; |
| } |
| |
| ++summary->num_iterations; |
| // Polynomial interpolation requires inputs ordered according to step size, |
| // not f(step size). |
| const FunctionSample& lower_bound_step = |
| bracket_low.x < bracket_high.x ? bracket_low : bracket_high; |
| const FunctionSample& upper_bound_step = |
| bracket_low.x < bracket_high.x ? bracket_high : bracket_low; |
| // We are performing 2-point interpolation only here, but the API of |
| // InterpolatingPolynomialMinimizingStepSize() allows for up to |
| // 3-point interpolation, so pad call with a sample with an invalid |
| // value that will therefore be ignored. |
| const FunctionSample unused_previous; |
| DCHECK(!unused_previous.value_is_valid); |
| const absl::Time polynomial_minimization_start_time = absl::Now(); |
| const double step_size = this->InterpolatingPolynomialMinimizingStepSize( |
| options().interpolation_type, |
| lower_bound_step, |
| unused_previous, |
| upper_bound_step, |
| lower_bound_step.x, |
| upper_bound_step.x); |
| summary->polynomial_minimization_time += |
| (absl::Now() - polynomial_minimization_start_time); |
| // No check on magnitude of step size being too small here as it is |
| // lower-bounded by the initial bracket start point, which was valid. |
| // |
| // As we require the gradient to evaluate the Wolfe condition, we always |
| // calculate it together with the value, irrespective of the interpolation |
| // type. As opposed to only calculating the gradient after the Armijo |
| // condition is satisfied, as the computational saving from this approach |
| // would be slight (perhaps even negative due to the extra call). Also, |
| // always calculating the value & gradient together protects against us |
| // reporting invalid solutions if the cost function returns slightly |
| // different function values when evaluated with / without gradients (due |
| // to numerical issues). |
| ++summary->num_function_evaluations; |
| ++summary->num_gradient_evaluations; |
| const bool kEvaluateGradient = true; |
| function->Evaluate(step_size, kEvaluateGradient, solution); |
| if (!solution->value_is_valid || !solution->gradient_is_valid) { |
| summary->error = absl::StrFormat( |
| "Line search failed: Wolfe Zoom phase found " |
| "step_size: %.5e, for which function is invalid, " |
| "between low_step: %.5e and high_step: %.5e " |
| "at which function is valid.", |
| solution->x, |
| bracket_low.x, |
| bracket_high.x); |
| if (!options().is_silent) { |
| LOG(WARNING) << summary->error; |
| } |
| return false; |
| } |
| |
| VLOG(3) << "Zoom iteration: " |
| << summary->num_iterations - num_bracketing_iterations |
| << ", bracket_low: " << bracket_low |
| << ", bracket_high: " << bracket_high |
| << ", minimizing solution: " << *solution; |
| |
| if ((solution->value > (initial_position.value + |
| options().sufficient_decrease * |
| initial_position.gradient * solution->x)) || |
| (solution->value >= bracket_low.value)) { |
| // Armijo sufficient decrease not satisfied, or not better |
| // than current lowest sample, use as new upper bound. |
| bracket_high = *solution; |
| continue; |
| } |
| |
| // Armijo sufficient decrease satisfied, check strong Wolfe condition. |
| if (fabs(solution->gradient) <= |
| -options().sufficient_curvature_decrease * initial_position.gradient) { |
| // Found a valid termination point satisfying strong Wolfe conditions. |
| VLOG(3) << std::scientific |
| << std::setprecision(kErrorMessageNumericPrecision) |
| << "Zoom phase found step size: " << solution->x |
| << ", satisfying strong Wolfe conditions."; |
| break; |
| |
| } else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) { |
| bracket_high = bracket_low; |
| } |
| |
| bracket_low = *solution; |
| } |
| // Solution contains a valid point which satisfies the strong Wolfe |
| // conditions. |
| return true; |
| } |
| |
| } // namespace ceres::internal |