Dogleg strategy and timing cleanups.
1. A new dogleg trust region strategy.
2. Consistent naming of all variables taking and reporting
time. Also all are doubles now.
3. Enum to stringification routines.
4. bundle_adjuster.cc accepts max solver time and trust_region_strategy.
5. Time accounting is pushed into solver_impl.cc and there is now
postprocessing time accounted for explicitly.
6. IterationCallback now has cumulative time.
7. LoggingCallback logs per iteration and cumulative time.
8. TrustRegionStrategy now allows for Invalid steps to be indicated
explicitly.
9. Trust region minimizer actually terminates on max_solver_time.
Change-Id: I7e3b82c8beebc17b6b355ea46ddd280754a2d8b2
diff --git a/examples/bundle_adjuster.cc b/examples/bundle_adjuster.cc
index 1216a82..5e1b3a6 100644
--- a/examples/bundle_adjuster.cc
+++ b/examples/bundle_adjuster.cc
@@ -84,6 +84,8 @@
"parameterization.");
DEFINE_bool(robustify, false, "Use a robust loss function");
DEFINE_bool(use_block_amd, true, "Use a block oriented fill reducing ordering.");
+DEFINE_string(trust_region_strategy, "lm", "Options are: lm, dogleg");
+DEFINE_double(max_solver_time, 1e32, "Maximum solve time in seconds.");
namespace ceres {
namespace examples {
@@ -218,6 +220,15 @@
options->minimizer_progress_to_stdout = true;
options->num_threads = FLAGS_num_threads;
options->eta = FLAGS_eta;
+ options->max_solver_time_in_seconds = FLAGS_max_solver_time;
+ if (FLAGS_trust_region_strategy == "lm") {
+ options->trust_region_strategy_type = LEVENBERG_MARQUARDT;
+ } else if (FLAGS_trust_region_strategy == "dogleg") {
+ options->trust_region_strategy_type = DOGLEG;
+ } else {
+ LOG(FATAL) << "Unknown trust region strategy: "
+ << FLAGS_trust_region_strategy;
+ }
}
void SetSolverOptionsFromFlags(BALProblem* bal_problem,
diff --git a/include/ceres/iteration_callback.h b/include/ceres/iteration_callback.h
index 6d7720c..29157d3 100644
--- a/include/ceres/iteration_callback.h
+++ b/include/ceres/iteration_callback.h
@@ -91,15 +91,15 @@
// Newton step.
int linear_solver_iterations;
- // TODO(sameeragarwal): Change the following two to use a higher
- // precision timer using clock_gettime.
- //
// Time (in seconds) spent inside the minimizer loop in the current
// iteration.
- int iteration_time_sec;
+ double iteration_time_in_seconds;
// Time (in seconds) spent inside the trust region step solver.
- int step_solver_time_sec;
+ double step_solver_time_in_seconds;
+
+ // Time (in seconds) since the user called Solve().
+ double cumulative_time_in_seconds;
};
// Interface for specifying callbacks that are executed at the end of
diff --git a/include/ceres/solver.h b/include/ceres/solver.h
index 12351ab..5ca15e9 100644
--- a/include/ceres/solver.h
+++ b/include/ceres/solver.h
@@ -59,7 +59,7 @@
Options() {
trust_region_strategy_type = LEVENBERG_MARQUARDT;
max_num_iterations = 50;
- max_solver_time_sec = 1e9;
+ max_solver_time_in_seconds = 1e9;
num_threads = 1;
initial_trust_region_radius = 1e4;
max_trust_region_radius = 1e16;
@@ -117,7 +117,7 @@
int max_num_iterations;
// Maximum time for which the minimizer should run for.
- double max_solver_time_sec;
+ double max_solver_time_in_seconds;
// Number of threads used by Ceres for evaluating the cost and
// jacobians.
@@ -377,8 +377,21 @@
int num_successful_steps;
int num_unsuccessful_steps;
+ // When the user calls Solve, before the actual optimization
+ // occurs, Ceres performs a number of preprocessing steps. These
+ // include error checks, memory allocations, and reorderings. This
+ // time is accounted for as preprocessing time.
double preprocessor_time_in_seconds;
+
+ // Time spent in the TrustRegionMinimizer.
double minimizer_time_in_seconds;
+
+ // After the Minimizer is finished, some time is spent in
+ // re-evaluating residuals etc. This time is accounted for in the
+ // postprocessor time.
+ double postprocessor_time_in_seconds;
+
+ // Some total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
// Preprocessor summary.
diff --git a/include/ceres/types.h b/include/ceres/types.h
index 705097f..0dcb354 100644
--- a/include/ceres/types.h
+++ b/include/ceres/types.h
@@ -158,8 +158,29 @@
PER_MINIMIZER_ITERATION
};
+// Ceres supports different strategies for computing the trust region
+// step.
enum TrustRegionStrategyType {
+ // The default trust region strategy is to use the step computation
+ // used in the Levenberg-Marquardt algorithm. For more details see
+ // levenberg_marquardt_strategy.h
LEVENBERG_MARQUARDT,
+
+ // Powell's dogleg algorithm interpolates between the Cauchy point
+ // and the Gauss-Newton step. It is particularly useful if the
+ // LEVENBERG_MARQUARDT algorithm is making a large number of
+ // unsuccessful steps. For more details see dogleg_strategy.h.
+ //
+ // NOTES:
+ //
+ // 1. This strategy has not been experimented with or tested as
+ // extensively as LEVENBERG_MARQUARDT, and therefore it should be
+ // considered EXPERIMENTAL for now.
+ //
+ // 2. For now this strategy should only be used with exact
+ // factorization based linear solvers, i.e., SPARSE_SCHUR,
+ // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.
+ DOGLEG
};
enum SolverTerminationType {
@@ -261,7 +282,9 @@
LinearSolverTerminationType type);
const char* OrderingTypeToString(OrderingType type);
const char* SolverTerminationTypeToString(SolverTerminationType type);
-
+const char* SparseLinearAlgebraLibraryTypeToString(
+ SparseLinearAlgebraLibraryType type);
+const char* TrustRegionStrategyTypeToString( TrustRegionStrategyType type);
bool IsSchurType(LinearSolverType type);
} // namespace ceres
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
index 432346f..076805f 100644
--- a/internal/ceres/CMakeLists.txt
+++ b/internal/ceres/CMakeLists.txt
@@ -48,6 +48,7 @@
dense_qr_solver.cc
dense_sparse_matrix.cc
detect_structure.cc
+ dogleg_strategy.cc
evaluator.cc
file.cc
gradient_checking_cost_function.cc
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
new file mode 100644
index 0000000..0950c0e
--- /dev/null
+++ b/internal/ceres/dogleg_strategy.cc
@@ -0,0 +1,281 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dogleg_strategy.h"
+
+#include <cmath>
+#include "Eigen/Core"
+#include <glog/logging.h>
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+const double kMaxMu = 1.0;
+const double kMinMu = 1e-8;
+}
+
+DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
+ : linear_solver_(options.linear_solver),
+ radius_(options.initial_radius),
+ max_radius_(options.max_radius),
+ min_diagonal_(options.lm_min_diagonal),
+ max_diagonal_(options.lm_max_diagonal),
+ mu_(kMinMu),
+ min_mu_(kMinMu),
+ max_mu_(kMaxMu),
+ mu_increase_factor_(10.0),
+ increase_threshold_(0.75),
+ decrease_threshold_(0.25),
+ dogleg_step_norm_(0.0),
+ reuse_(false) {
+ CHECK_NOTNULL(linear_solver_);
+ CHECK_GT(min_diagonal_, 0.0);
+ CHECK_LT(min_diagonal_, max_diagonal_);
+ CHECK_GT(max_radius_, 0.0);
+}
+
+// If the reuse_ flag is not set, then the Cauchy point (scaled
+// gradient) and the new Gauss-Newton step are computed from
+// scratch. The Dogleg step is then computed as interpolation of these
+// two vectors.
+LinearSolver::Summary DoglegStrategy::ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) {
+ CHECK_NOTNULL(jacobian);
+ CHECK_NOTNULL(residuals);
+ CHECK_NOTNULL(step);
+
+ const int n = jacobian->num_cols();
+ if (reuse_) {
+ // Gauss-Newton and gradient vectors are always available, only a
+ // new interpolant need to be computed.
+ ComputeDoglegStep(step);
+ LinearSolver::Summary linear_solver_summary;
+ linear_solver_summary.num_iterations = 0;
+ linear_solver_summary.termination_type = TOLERANCE;
+ return linear_solver_summary;
+ }
+
+ reuse_ = true;
+ // Check that we have the storage needed to hold the various
+ // temporary vectors.
+ if (diagonal_.rows() != n) {
+ diagonal_.resize(n, 1);
+ gradient_.resize(n, 1);
+ gauss_newton_step_.resize(n, 1);
+ }
+
+ // Vector used to form the diagonal matrix that is used to
+ // regularize the Gauss-Newton solve.
+ jacobian->SquaredColumnNorm(diagonal_.data());
+ for (int i = 0; i < n; ++i) {
+ diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
+ }
+
+ gradient_.setZero();
+ jacobian->LeftMultiply(residuals, gradient_.data());
+
+ // alpha * gradient is the Cauchy point.
+ Vector Jg(jacobian->num_rows());
+ Jg.setZero();
+ jacobian->RightMultiply(gradient_.data(), Jg.data());
+ alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
+
+ LinearSolver::Summary linear_solver_summary =
+ ComputeGaussNewtonStep(jacobian, residuals);
+
+ // Interpolate the Cauchy point and the Gauss-Newton step.
+ if (linear_solver_summary.termination_type != FAILURE) {
+ ComputeDoglegStep(step);
+ }
+
+ return linear_solver_summary;
+}
+
+void DoglegStrategy::ComputeDoglegStep(double* dogleg) {
+ VectorRef dogleg_step(dogleg, gradient_.rows());
+
+ // Case 1. The Gauss-Newton step lies inside the trust region, and
+ // is therefore the optimal solution to the trust-region problem.
+ const double gradient_norm = gradient_.norm();
+ const double gauss_newton_norm = gauss_newton_step_.norm();
+ if (gauss_newton_norm <= radius_) {
+ dogleg_step = gauss_newton_step_;
+ dogleg_step_norm_ = gauss_newton_norm;
+ VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
+ // the trust region. Rescale the Cauchy point to the trust region
+ // and return.
+ if (gradient_norm * alpha_ >= radius_) {
+ dogleg_step = (radius_ / gradient_norm) * gradient_;
+ dogleg_step_norm_ = radius_;
+ VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // Case 3. The Cauchy point is inside the trust region and the
+ // Gauss-Newton step is outside. Compute the line joining the two
+ // points and the point on it which intersects the trust region
+ // boundary.
+
+ // a = alpha * gradient
+ // b = gauss_newton_step
+ const double b_dot_a = alpha_ * gradient_.dot(gauss_newton_step_);
+ const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
+ const double b_minus_a_squared_norm =
+ a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
+
+ // c = a' (b - a)
+ // = alpha * gradient' gauss_newton_step - alpha^2 |gradient|^2
+ const double c = b_dot_a - a_squared_norm;
+ const double d = sqrt(c * c + b_minus_a_squared_norm *
+ (pow(radius_, 2.0) - a_squared_norm));
+
+ double beta =
+ (c <= 0)
+ ? (d - c) / b_minus_a_squared_norm
+ : (radius_ * radius_ - a_squared_norm) / (d + c);
+ dogleg_step = (alpha_ * (1.0 - beta)) * gradient_ + beta * gauss_newton_step_;
+ dogleg_step_norm_ = dogleg_step.norm();
+ VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+}
+
+LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
+ SparseMatrix* jacobian,
+ const double* residuals) {
+ const int n = jacobian->num_cols();
+ LinearSolver::Summary linear_solver_summary;
+ linear_solver_summary.termination_type = FAILURE;
+
+ // The Jacobian matrix is often quite poorly conditioned. Thus it is
+ // necessary to add a diagonal matrix at the bottom to prevent the
+ // linear solver from failing.
+ //
+ // We do this by computing the same diagonal matrix as the one used
+ // by Levenberg-Marquardt (other choices are possible), and scaling
+ // it by a small constant (independent of the trust region radius).
+ //
+ // If the solve fails, the multiplier to the diagonal is increased
+ // up to max_mu_ by a factor of mu_increase_factor_ every time. If
+ // the linear solver is still not successful, the strategy returns
+ // with FAILURE.
+ //
+ // Next time when a new Gauss-Newton step is requested, the
+ // multiplier starts out from the last successful solve.
+ //
+ // When a step is declared successful, the multiplier is decreased
+ // by half of mu_increase_factor_.
+ while (mu_ < max_mu_) {
+ // Dogleg, as far as I (sameeragarwal) understand it, requires a
+ // reasonably good estimate of the Gauss-Newton step. This means
+ // that we need to solve the normal equations more or less
+ // exactly. This is reflected in the values of the tolerances set
+ // below.
+ //
+ // For now, this strategy should only be used with exact
+ // factorization based solvers, for which these tolerances are
+ // automatically satisfied.
+ //
+ // The right way to combine inexact solves with trust region
+ // methods is to use Stiehaug's method.
+ LinearSolver::PerSolveOptions solve_options;
+ solve_options.q_tolerance = 0.0;
+ solve_options.r_tolerance = 0.0;
+
+ lm_diagonal_ = (diagonal_ * mu_).array().sqrt();
+ solve_options.D = lm_diagonal_.data();
+
+ InvalidateArray(n, gauss_newton_step_.data());
+ linear_solver_summary = linear_solver_->Solve(jacobian,
+ residuals,
+ solve_options,
+ gauss_newton_step_.data());
+
+ if (linear_solver_summary.termination_type == FAILURE ||
+ !IsArrayValid(n, gauss_newton_step_.data())) {
+ mu_ *= mu_increase_factor_;
+ VLOG(2) << "Increasing mu " << mu_;
+ linear_solver_summary.termination_type = FAILURE;
+ continue;
+ }
+ break;
+ }
+
+ return linear_solver_summary;
+}
+
+void DoglegStrategy::StepAccepted(double step_quality) {
+ CHECK_GT(step_quality, 0.0);
+ if (step_quality < decrease_threshold_) {
+ radius_ *= 0.5;
+ return;
+ }
+
+ if (step_quality > increase_threshold_) {
+ radius_ = max(radius_, 3.0 * dogleg_step_norm_);
+ }
+
+ // Reduce the regularization multiplier, in the hope that whatever
+ // was causing the rank deficiency has gone away and we can return
+ // to doing a pure Gauss-Newton solve.
+ mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ );
+ reuse_ = false;
+}
+
+void DoglegStrategy::StepRejected(double step_quality) {
+ radius_ *= 0.5;
+ reuse_ = true;
+}
+
+void DoglegStrategy::StepIsInvalid() {
+ mu_ *= mu_increase_factor_;
+ reuse_ = false;
+}
+
+double DoglegStrategy::Radius() const {
+ return radius_;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dogleg_strategy.h b/internal/ceres/dogleg_strategy.h
new file mode 100644
index 0000000..17eb075
--- /dev/null
+++ b/internal/ceres/dogleg_strategy.h
@@ -0,0 +1,130 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
+#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Dogleg step computation and trust region sizing strategy based on
+// on "Methods for Nonlinear Least Squares" by K. Madsen, H.B. Nielsen
+// and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// One minor modification is that instead of computing the pure
+// Gauss-Newton step, we compute a regularized version of it. This is
+// because the Jacobian is often rank-deficient and in such cases
+// using a direct solver leads to numerical failure.
+class DoglegStrategy : public TrustRegionStrategy {
+public:
+ DoglegStrategy(const TrustRegionStrategy::Options& options);
+ virtual ~DoglegStrategy() {}
+
+ // TrustRegionStrategy interface
+ virtual LinearSolver::Summary ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step);
+ virtual void StepAccepted(double step_quality);
+ virtual void StepRejected(double step_quality);
+ virtual void StepIsInvalid();
+
+ virtual double Radius() const;
+
+ private:
+ void ComputeCauchyStep();
+ LinearSolver::Summary ComputeGaussNewtonStep(SparseMatrix* jacobian,
+ const double* residuals);
+ void ComputeDoglegStep(double* step);
+
+ LinearSolver* linear_solver_;
+ double radius_;
+ const double max_radius_;
+
+ const double min_diagonal_;
+ const double max_diagonal_;
+
+ // mu is used to scale the diagonal matrix used to make the
+ // Gauss-Newton solve full rank. In each solve, the strategy starts
+ // out with mu = min_mu, and tries values upto max_mu. If the user
+ // reports an invalid step, the value of mu_ is increased so that
+ // the next solve starts with a stronger regularization.
+ //
+ // If a successful step is reported, then the value of mu_ is
+ // decreased with a lower bound of min_mu_.
+ double mu_;
+ const double min_mu_;
+ const double max_mu_;
+ const double mu_increase_factor_;
+ const double increase_threshold_;
+ const double decrease_threshold_;
+
+ Vector diagonal_;
+ Vector lm_diagonal_;
+
+ Vector gradient_;
+ Vector gauss_newton_step_;
+
+ // cauchy_step = alpha * gradient
+ double alpha_;
+ double dogleg_step_norm_;
+
+ // When, ComputeStep is called, reuse_ indicates whether the
+ // Gauss-Newton and Cauchy steps from the last call to ComputeStep
+ // can be reused or not.
+ //
+ // If the user called StepAccepted, then it is expected that the
+ // user has recomputed the Jacobian matrix and new Gauss-Newton
+ // solve is needed and reuse is set to false.
+ //
+ // If the user called StepRejected, then it is expected that the
+ // user wants to solve the trust region problem with the same matrix
+ // but a different trust region radius and the Gauss-Newton and
+ // Cauchy steps can be reused to compute the Dogleg, thus reuse is
+ // set to true.
+ //
+ // If the user called StepIsInvalid, then there was a numerical
+ // problem with the step computed in the last call to ComputeStep,
+ // and the regularization used to do the Gauss-Newton solve is
+ // increased and a new solve should be done when ComputeStep is
+ // called again, thus reuse is set to false.
+ bool reuse_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DOGLEG_STRATEGY_H_
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index 54ff783..81070e8 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -1,3 +1,33 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
#include "ceres/levenberg_marquardt_strategy.h"
#include <cmath>
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
index eeda298..e15b165 100644
--- a/internal/ceres/minimizer.h
+++ b/internal/ceres/minimizer.h
@@ -60,7 +60,7 @@
void Init(const Solver::Options& options) {
max_num_iterations = options.max_num_iterations;
- max_solver_time_sec = options.max_solver_time_sec;
+ max_solver_time_in_seconds = options.max_solver_time_in_seconds;
max_step_solver_retries = 5;
gradient_tolerance = options.gradient_tolerance;
parameter_tolerance = options.parameter_tolerance;
@@ -81,7 +81,7 @@
}
int max_num_iterations;
- int max_solver_time_sec;
+ double max_solver_time_in_seconds;
// Number of times the linear solver should be retried in case of
// numerical failure. The retries are done by exponentially scaling up
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index f5f5d91..901e543 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -49,8 +49,6 @@
time_t start_time_seconds = time(NULL);
internal::SolverImpl::Solve(options, problem, summary);
summary->total_time_in_seconds = time(NULL) - start_time_seconds;
- summary->preprocessor_time_in_seconds =
- summary->total_time_in_seconds - summary->minimizer_time_in_seconds;
}
void Solve(const Solver::Options& options,
@@ -59,8 +57,6 @@
time_t start_time_seconds = time(NULL);
internal::SolverImpl::Solve(options, problem, summary);
summary->total_time_in_seconds = time(NULL) - start_time_seconds;
- summary->preprocessor_time_in_seconds =
- summary->total_time_in_seconds - summary->minimizer_time_in_seconds;
}
Solver::Summary::Summary()
@@ -74,6 +70,7 @@
num_unsuccessful_steps(-1),
preprocessor_time_in_seconds(-1.0),
minimizer_time_in_seconds(-1.0),
+ postprocessor_time_in_seconds(-1.0),
total_time_in_seconds(-1.0),
num_parameter_blocks(-1),
num_parameters(-1),
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
index fde5811..41b6d75 100644
--- a/internal/ceres/solver_impl.cc
+++ b/internal/ceres/solver_impl.cc
@@ -97,7 +97,7 @@
CallbackReturnType operator()(const IterationSummary& summary) {
const char* kReportRowFormat =
"% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
- "rho:% 3.2e mu:% 3.2e li:% 3d";
+ "rho:% 3.2e mu:% 3.2e li:% 3d it:% 3.2e tt:% 3.2e";
string output = StringPrintf(kReportRowFormat,
summary.iteration,
summary.cost,
@@ -106,7 +106,9 @@
summary.step_norm,
summary.relative_decrease,
summary.trust_region_radius,
- summary.linear_solver_iterations);
+ summary.linear_solver_iterations,
+ summary.iteration_time_in_seconds,
+ summary.cumulative_time_in_seconds);
if (log_to_stdout_) {
cout << output << endl;
} else {
@@ -164,6 +166,7 @@
void SolverImpl::Solve(const Solver::Options& original_options,
Problem* problem,
Solver::Summary* summary) {
+ time_t solver_start_time = time(NULL);
Solver::Options options(original_options);
#ifndef CERES_USE_OPENMP
@@ -273,6 +276,10 @@
// Collect the discontiguous parameters into a contiguous state vector.
reduced_program->ParameterBlocksToStateVector(parameters.data());
+ time_t minimizer_start_time = time(NULL);
+ summary->preprocessor_time_in_seconds =
+ minimizer_start_time - solver_start_time;
+
// Run the optimization.
Minimize(options,
reduced_program.get(),
@@ -289,6 +296,7 @@
return;
}
+ time_t post_process_start_time = time(NULL);
// Push the contiguous optimized parameters back to the user's parameters.
reduced_program->StateVectorToParameterBlocks(parameters.data());
reduced_program->CopyParameterBlockStateToUserState();
@@ -301,6 +309,9 @@
: NULL);
// Stick a fork in it, we're done.
+ time_t post_process_end_time = time(NULL);
+ summary->postprocessor_time_in_seconds =
+ post_process_end_time - post_process_start_time;
return;
}
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 6030f95..dd4a8a7 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -158,7 +158,7 @@
// something similar across the board.
iteration_summary.eta = options_.eta;
iteration_summary.linear_solver_iterations = 0;
- iteration_summary.step_solver_time_sec = 0;
+ iteration_summary.step_solver_time_in_seconds = 0;
// Do initial cost and Jacobian evaluation.
double cost = 0.0;
@@ -209,9 +209,13 @@
return;
}
- // Call the various callbacks.
- iteration_summary.iteration_time_sec = time(NULL) - iteration_start_time;
+ iteration_summary.iteration_time_in_seconds =
+ time(NULL) - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time +
+ summary->preprocessor_time_in_seconds;
summary->iterations.push_back(iteration_summary);
+
+ // Call the various callbacks.
switch (RunCallbacks(options_, iteration_summary)) {
case SOLVER_TERMINATE_SUCCESSFULLY:
summary->termination_type = USER_SUCCESS;
@@ -236,7 +240,9 @@
break;
}
- if ((start_time - iteration_start_time) >= options_.max_solver_time_sec) {
+ const double total_solver_time = iteration_start_time - start_time +
+ summary->preprocessor_time_in_seconds;
+ if (total_solver_time >= options_.max_solver_time_in_seconds) {
summary->termination_type = NO_CONVERGENCE;
VLOG(1) << "Terminating: Maximum solver time reached.";
break;
@@ -257,7 +263,8 @@
residuals.data(),
trust_region_step.data());
- iteration_summary.step_solver_time_sec = time(NULL) - strategy_start_time;
+ iteration_summary.step_solver_time_in_seconds =
+ time(NULL) - strategy_start_time;
iteration_summary.linear_solver_iterations =
strategy_summary.num_iterations;
@@ -365,7 +372,8 @@
options_.function_tolerance * cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
VLOG(1) << "Terminating. Function tolerance reached. "
- << "|cost_change|/cost: " << fabs(iteration_summary.cost_change) / cost
+ << "|cost_change|/cost: "
+ << fabs(iteration_summary.cost_change) / cost
<< " <= " << options_.function_tolerance;
summary->termination_type = FUNCTION_TOLERANCE;
return;
@@ -409,7 +417,11 @@
}
} else {
++summary->num_unsuccessful_steps;
- strategy->StepRejected(iteration_summary.relative_decrease);
+ if (iteration_summary.step_is_valid) {
+ strategy->StepRejected(iteration_summary.relative_decrease);
+ } else {
+ strategy->StepIsInvalid();
+ }
}
iteration_summary.cost = cost + summary->fixed_cost;
@@ -421,8 +433,12 @@
return;
}
- iteration_summary.iteration_time_sec = time(NULL) - iteration_start_time;
+ iteration_summary.iteration_time_in_seconds =
+ time(NULL) - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time +
+ summary->preprocessor_time_in_seconds;
summary->iterations.push_back(iteration_summary);
+
switch (RunCallbacks(options_, iteration_summary)) {
case SOLVER_TERMINATE_SUCCESSFULLY:
summary->termination_type = USER_SUCCESS;
diff --git a/internal/ceres/trust_region_strategy.cc b/internal/ceres/trust_region_strategy.cc
index 5c69bd4..2e058b2 100644
--- a/internal/ceres/trust_region_strategy.cc
+++ b/internal/ceres/trust_region_strategy.cc
@@ -1,4 +1,5 @@
#include "ceres/trust_region_strategy.h"
+#include "ceres/dogleg_strategy.h"
#include "ceres/levenberg_marquardt_strategy.h"
namespace ceres {
@@ -10,6 +11,8 @@
switch (options.trust_region_strategy_type) {
case LEVENBERG_MARQUARDT:
return new LevenbergMarquardtStrategy(options);
+ case DOGLEG:
+ return new DoglegStrategy(options);
default:
LOG(FATAL) << "Unknown trust region strategy: "
<< options.trust_region_strategy_type;
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 2463d64..b15a895 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -66,7 +66,9 @@
double max_radius;
// Minimum and maximum values of the diagonal damping matrix used
- // by LevenbergMarquardtStrategy.
+ // by LevenbergMarquardtStrategy. The DoglegStrategy also uses
+ // these bounds to construct a regularizing diagonal to ensure
+ // that the Gauss-Newton step computation is of full rank.
double lm_min_diagonal;
double lm_max_diagonal;
};
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
index c6c4dea..18fefad 100644
--- a/internal/ceres/types.cc
+++ b/internal/ceres/types.cc
@@ -97,6 +97,26 @@
}
}
+const char* SparseLinearAlgebraTypeToString(
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+ switch (sparse_linear_algebra_library_type) {
+ CASESTR(CX_SPARSE);
+ CASESTR(SUITE_SPARSE);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* TrustRegionStrategyTypeToString(
+ TrustRegionStrategyType trust_region_strategy_type) {
+ switch (trust_region_strategy_type) {
+ CASESTR(LEVENBERG_MARQUARDT);
+ CASESTR(DOGLEG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
#undef CASESTR
bool IsSchurType(LinearSolverType type) {