Problem::Evaluate implementation. 1. Add Problem::Evaluate and tests. 2. Remove Solver::Summary::initial/final_* 3. Remove Solver::Options::return_* members. 4. Various cpplint cleanups. Change-Id: I4266de53489896f72d9c6798c5efde6748d68a47
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h index 99cc8d8..742f439 100644 --- a/internal/ceres/array_utils.h +++ b/internal/ceres/array_utils.h
@@ -28,7 +28,7 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) // -// Utility routines for validating arrays. +// Utility routines for validating arrays. // // These are useful for detecting two common class of errors. //
diff --git a/internal/ceres/autodiff_test.cc b/internal/ceres/autodiff_test.cc index 1c56c22..23f9e9d 100644 --- a/internal/ceres/autodiff_test.cc +++ b/internal/ceres/autodiff_test.cc
@@ -372,7 +372,8 @@ const double kTolerance = 1e-14; for (int i = 0; i < num_residuals; ++i) { EXPECT_NEAR(J_x[2 * i + 0], i * x[1] * x[1], kTolerance) << "i: " << i; - EXPECT_NEAR(J_x[2 * i + 1], 2 * i * x[0] * x[1], kTolerance) << "i: " << i; + EXPECT_NEAR(J_x[2 * i + 1], 2 * i * x[0] * x[1], kTolerance) + << "i: " << i; } } }
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc index 5785e5d..1d5f9d7 100644 --- a/internal/ceres/block_jacobi_preconditioner.cc +++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -40,7 +40,8 @@ namespace ceres { namespace internal { -BlockJacobiPreconditioner::BlockJacobiPreconditioner(const BlockSparseMatrixBase& A) +BlockJacobiPreconditioner::BlockJacobiPreconditioner( + const BlockSparseMatrixBase& A) : num_rows_(A.num_rows()), block_structure_(*A.block_structure()) { // Calculate the amount of storage needed. @@ -65,7 +66,8 @@ BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {} -bool BlockJacobiPreconditioner::Update(const BlockSparseMatrixBase& A, const double* D) { +bool BlockJacobiPreconditioner::Update(const BlockSparseMatrixBase& A, + const double* D) { const CompressedRowBlockStructure* bs = A.block_structure(); // Compute the diagonal blocks by block inner products. @@ -104,7 +106,8 @@ MatrixRef block(blocks_[c], size, size); if (D != NULL) { - block.diagonal() += ConstVectorRef(D + position, size).array().square().matrix(); + block.diagonal() += + ConstVectorRef(D + position, size).array().square().matrix(); } block = block.selfadjointView<Eigen::Upper>() @@ -114,7 +117,8 @@ return true; } -void BlockJacobiPreconditioner::RightMultiply(const double* x, double* y) const { +void BlockJacobiPreconditioner::RightMultiply(const double* x, + double* y) const { for (int c = 0; c < block_structure_.cols.size(); ++c) { const int size = block_structure_.cols[c].size; const int position = block_structure_.cols[c].position;
diff --git a/internal/ceres/canonical_views_clustering.h b/internal/ceres/canonical_views_clustering.h index 171ac55..5f8e4e3 100644 --- a/internal/ceres/canonical_views_clustering.h +++ b/internal/ceres/canonical_views_clustering.h
@@ -43,11 +43,11 @@ #include <vector> -#include <glog/logging.h> #include "ceres/collections_port.h" #include "ceres/graph.h" -#include "ceres/map_util.h" #include "ceres/internal/macros.h" +#include "ceres/map_util.h" +#include "glog/logging.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h index f61c8d1..d560a9d 100644 --- a/internal/ceres/cgnr_solver.h +++ b/internal/ceres/cgnr_solver.h
@@ -51,10 +51,11 @@ class CgnrSolver : public BlockSparseMatrixBaseSolver { public: explicit CgnrSolver(const LinearSolver::Options& options); - virtual Summary SolveImpl(BlockSparseMatrixBase* A, - const double* b, - const LinearSolver::PerSolveOptions& per_solve_options, - double* x); + virtual Summary SolveImpl( + BlockSparseMatrixBase* A, + const double* b, + const LinearSolver::PerSolveOptions& per_solve_options, + double* x); private: const LinearSolver::Options options_;
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc index 912c484..bbadb77 100644 --- a/internal/ceres/compressed_row_jacobian_writer.cc +++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -135,7 +135,8 @@ // Populate the row and column block vectors for use by block // oriented ordering algorithms. This is useful when // Solver::Options::use_block_amd = true. - const vector<ParameterBlock*>& parameter_blocks = program_->parameter_blocks(); + const vector<ParameterBlock*>& parameter_blocks = + program_->parameter_blocks(); vector<int>& col_blocks = *(jacobian->mutable_col_blocks()); col_blocks.resize(parameter_blocks.size()); for (int i = 0; i < parameter_blocks.size(); ++i) {
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h index 10f96c0..c9c904b 100644 --- a/internal/ceres/compressed_row_sparse_matrix.h +++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -32,13 +32,14 @@ #define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_ #include <vector> -#include <glog/logging.h> -#include "ceres/sparse_matrix.h" -#include "ceres/triplet_sparse_matrix.h" + #include "ceres/internal/eigen.h" #include "ceres/internal/macros.h" #include "ceres/internal/port.h" +#include "ceres/sparse_matrix.h" +#include "ceres/triplet_sparse_matrix.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres {
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc index 5ce110c..c4da987 100644 --- a/internal/ceres/coordinate_descent_minimizer.cc +++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -129,7 +129,8 @@ parameter_block->SetConstant(); } - scoped_array<LinearSolver*> linear_solvers(new LinearSolver*[options.num_threads]); + scoped_array<LinearSolver*> linear_solvers( + new LinearSolver*[options.num_threads]); LinearSolver::Options linear_solver_options; linear_solver_options.type = DENSE_QR;
diff --git a/internal/ceres/coordinate_descent_minimizer.h b/internal/ceres/coordinate_descent_minimizer.h index fb96b3c..3dcf8fa 100644 --- a/internal/ceres/coordinate_descent_minimizer.h +++ b/internal/ceres/coordinate_descent_minimizer.h
@@ -31,6 +31,8 @@ #ifndef CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_ #define CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_ +#include <vector> + #include "ceres/evaluator.h" #include "ceres/minimizer.h" #include "ceres/problem_impl.h"
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc index 8ee02bd..90ccc82 100644 --- a/internal/ceres/cost_function_to_functor_test.cc +++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -1,3 +1,33 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + #include "ceres/cost_function_to_functor.h" #include "ceres/autodiff_cost_function.h" #include "gtest/gtest.h" @@ -9,11 +39,15 @@ void ExpectCostFunctionsAreEqual(const CostFunction& cost_function, const CostFunction& actual_cost_function) { - EXPECT_EQ(cost_function.num_residuals(), actual_cost_function.num_residuals()); + EXPECT_EQ(cost_function.num_residuals(), + actual_cost_function.num_residuals()); const int num_residuals = cost_function.num_residuals(); - const vector<int16>& parameter_block_sizes = cost_function.parameter_block_sizes(); - const vector<int16>& actual_parameter_block_sizes = actual_cost_function.parameter_block_sizes(); - EXPECT_EQ(parameter_block_sizes.size(), actual_parameter_block_sizes.size()); + const vector<int16>& parameter_block_sizes = + cost_function.parameter_block_sizes(); + const vector<int16>& actual_parameter_block_sizes = + actual_cost_function.parameter_block_sizes(); + EXPECT_EQ(parameter_block_sizes.size(), + actual_parameter_block_sizes.size()); int num_parameters = 0; for (int i = 0; i < parameter_block_sizes.size(); ++i) { @@ -30,22 +64,29 @@ scoped_array<double> jacobians(new double[num_parameters * num_residuals]); scoped_array<double> actual_residuals(new double[num_residuals]); - scoped_array<double> actual_jacobians(new double[num_parameters * num_residuals]); + scoped_array<double> actual_jacobians + (new double[num_parameters * num_residuals]); - scoped_array<double*> parameter_blocks(new double*[parameter_block_sizes.size()]); - scoped_array<double*> jacobian_blocks(new double*[parameter_block_sizes.size()]); - scoped_array<double*> actual_jacobian_blocks(new double*[parameter_block_sizes.size()]); + scoped_array<double*> parameter_blocks( + new double*[parameter_block_sizes.size()]); + scoped_array<double*> jacobian_blocks( + new double*[parameter_block_sizes.size()]); + scoped_array<double*> actual_jacobian_blocks( + new double*[parameter_block_sizes.size()]); num_parameters = 0; for (int i = 0; i < parameter_block_sizes.size(); ++i) { parameter_blocks[i] = parameters.get() + num_parameters; jacobian_blocks[i] = jacobians.get() + num_parameters * num_residuals; - actual_jacobian_blocks[i] = actual_jacobians.get() + num_parameters * num_residuals; + actual_jacobian_blocks[i] = + actual_jacobians.get() + num_parameters * num_residuals; num_parameters += parameter_block_sizes[i]; } - EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.get(), residuals.get(), NULL)); - EXPECT_TRUE(actual_cost_function.Evaluate(parameter_blocks.get(), actual_residuals.get(), NULL)); + EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.get(), + residuals.get(), NULL)); + EXPECT_TRUE(actual_cost_function.Evaluate(parameter_blocks.get(), + actual_residuals.get(), NULL)); for (int i = 0; i < num_residuals; ++i) { EXPECT_NEAR(residuals[i], actual_residuals[i], kTolerance) << "residual id: " << i; @@ -65,7 +106,8 @@ for (int i = 0; i < num_residuals * num_parameters; ++i) { EXPECT_NEAR(jacobians[i], actual_jacobians[i], kTolerance) - << "jacobian : " << i << " " << jacobians[i] << " " << actual_jacobians[i]; + << "jacobian : " << i << " " + << jacobians[i] << " " << actual_jacobians[i]; } }; @@ -200,7 +242,7 @@ }; #define TEST_BODY(NAME) \ - TEST(CostFunctionToFunctor, NAME){ \ + TEST(CostFunctionToFunctor, NAME) { \ scoped_ptr<CostFunction> cost_function( \ new AutoDiffCostFunction< \ CostFunctionToFunctor<2, PARAMETER_BLOCK_SIZES >, \
diff --git a/internal/ceres/dense_qr_solver.cc b/internal/ceres/dense_qr_solver.cc index 21b6b15..1fb9709 100644 --- a/internal/ceres/dense_qr_solver.cc +++ b/internal/ceres/dense_qr_solver.cc
@@ -63,7 +63,8 @@ } // rhs = [b;0] to account for the additional rows in the lhs. - const int augmented_num_rows = num_rows + ((per_solve_options.D != NULL) ? num_cols : 0); + const int augmented_num_rows = + num_rows + ((per_solve_options.D != NULL) ? num_cols : 0); if (rhs_.rows() != augmented_num_rows) { rhs_.resize(augmented_num_rows); rhs_.setZero();
diff --git a/internal/ceres/dense_sparse_matrix.cc b/internal/ceres/dense_sparse_matrix.cc index e18a507..978ac6a 100644 --- a/internal/ceres/dense_sparse_matrix.cc +++ b/internal/ceres/dense_sparse_matrix.cc
@@ -47,7 +47,9 @@ m_.setZero(); } -DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols, bool reserve_diagonal) +DenseSparseMatrix::DenseSparseMatrix(int num_rows, + int num_cols, + bool reserve_diagonal) : has_diagonal_appended_(false), has_diagonal_reserved_(reserve_diagonal) { // Allocate enough space for the diagonal.
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc index da861fe..a330ad2 100644 --- a/internal/ceres/dogleg_strategy.cc +++ b/internal/ceres/dogleg_strategy.cc
@@ -87,7 +87,7 @@ // Gauss-Newton and gradient vectors are always available, only a // new interpolant need to be computed. For the subspace case, // the subspace and the two-dimensional model are also still valid. - switch(dogleg_type_) { + switch (dogleg_type_) { case TRADITIONAL_DOGLEG: ComputeTraditionalDoglegStep(step); break; @@ -135,7 +135,7 @@ summary.termination_type = linear_solver_summary.termination_type; if (linear_solver_summary.termination_type != FAILURE) { - switch(dogleg_type_) { + switch (dogleg_type_) { // Interpolate the Cauchy point and the Gauss-Newton step. case TRADITIONAL_DOGLEG: ComputeTraditionalDoglegStep(step); @@ -415,15 +415,15 @@ const double trB = subspace_B_.trace(); const double r2 = radius_ * radius_; Matrix2d B_adj; - B_adj << subspace_B_(1,1) , -subspace_B_(0,1), - -subspace_B_(1,0) , subspace_B_(0,0); + B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1), + -subspace_B_(1, 0) , subspace_B_(0, 0); Vector polynomial(5); polynomial(0) = r2; polynomial(1) = 2.0 * r2 * trB; - polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm(); - polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_ - - r2 * detB * trB ); + polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm(); + polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_ + - r2 * detB * trB); polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm(); return polynomial; @@ -598,7 +598,7 @@ // Reduce the regularization multiplier, in the hope that whatever // was causing the rank deficiency has gone away and we can return // to doing a pure Gauss-Newton solve. - mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ ); + mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_); reuse_ = false; }
diff --git a/internal/ceres/dogleg_strategy.h b/internal/ceres/dogleg_strategy.h index bff1689..7131467 100644 --- a/internal/ceres/dogleg_strategy.h +++ b/internal/ceres/dogleg_strategy.h
@@ -53,8 +53,8 @@ // This finds the exact optimum over the two-dimensional subspace // spanned by the two Dogleg vectors. class DoglegStrategy : public TrustRegionStrategy { -public: - DoglegStrategy(const TrustRegionStrategy::Options& options); + public: + explicit DoglegStrategy(const TrustRegionStrategy::Options& options); virtual ~DoglegStrategy() {} // TrustRegionStrategy interface
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc index 24c526e..caa397e 100644 --- a/internal/ceres/dogleg_strategy_test.cc +++ b/internal/ceres/dogleg_strategy_test.cc
@@ -129,9 +129,9 @@ scoped_ptr<LinearSolver> linear_solver( new DenseQRSolver(LinearSolver::Options())); options_.linear_solver = linear_solver.get(); - // The global minimum is at (1, 1, ..., 1), so the distance to it is sqrt(6.0). - // By restricting the trust region to a radius of 2.0, we test if the trust - // region is actually obeyed. + // The global minimum is at (1, 1, ..., 1), so the distance to it is + // sqrt(6.0). By restricting the trust region to a radius of 2.0, + // we test if the trust region is actually obeyed. options_.dogleg_type = TRADITIONAL_DOGLEG; options_.initial_radius = 2.0; options_.max_radius = 2.0;
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h index 76cf1e0..14a8818 100644 --- a/internal/ceres/evaluator.h +++ b/internal/ceres/evaluator.h
@@ -32,6 +32,7 @@ #ifndef CERES_INTERNAL_EVALUATOR_H_ #define CERES_INTERNAL_EVALUATOR_H_ +#include <map> #include <string> #include <vector>
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc index a4e7b25..9e9d31b 100644 --- a/internal/ceres/evaluator_test.cc +++ b/internal/ceres/evaluator_test.cc
@@ -833,7 +833,11 @@ (i & 4) ? expected.jacobian : NULL); } - + // The Evaluate call should only depend on the parameter block + // values in the user provided pointers, and the current state of + // the parameter block should not matter. So, create a new + // parameters vector, and update the parameter block states with + // it. The results from the Evaluate call should not change. double new_parameters[6]; for (int i = 0; i < 6; ++i) { new_parameters[i] = 0.0;
diff --git a/internal/ceres/execution_summary.h b/internal/ceres/execution_summary.h index 2eb3c1a..29bdc69 100644 --- a/internal/ceres/execution_summary.h +++ b/internal/ceres/execution_summary.h
@@ -56,8 +56,8 @@ calls_[name] += 1; } - const map<string, double>& times() const { return times_; }; - const map<string, int>& calls() const { return calls_; }; + const map<string, double>& times() const { return times_; } + const map<string, int>& calls() const { return calls_; } private: Mutex times_mutex_;
diff --git a/internal/ceres/gradient_checker_test.cc b/internal/ceres/gradient_checker_test.cc index cf7ee20..fa0d841 100644 --- a/internal/ceres/gradient_checker_test.cc +++ b/internal/ceres/gradient_checker_test.cc
@@ -34,11 +34,11 @@ #include <cmath> #include <cstdlib> -#include <glog/logging.h> #include <vector> #include "ceres/cost_function.h" #include "ceres/random.h" +#include "glog/logging.h" #include "gtest/gtest.h" namespace ceres {
diff --git a/internal/ceres/graph.h b/internal/ceres/graph.h index e080489..5f92d4d 100644 --- a/internal/ceres/graph.h +++ b/internal/ceres/graph.h
@@ -32,12 +32,13 @@ #define CERES_INTERNAL_GRAPH_H_ #include <limits> -#include <glog/logging.h> +#include <utility> #include "ceres/integral_types.h" #include "ceres/map_util.h" #include "ceres/collections_port.h" #include "ceres/internal/macros.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/graph_algorithms.h b/internal/ceres/graph_algorithms.h index 3b42d93..2e6eec0 100644 --- a/internal/ceres/graph_algorithms.h +++ b/internal/ceres/graph_algorithms.h
@@ -33,10 +33,12 @@ #ifndef CERES_INTERNAL_GRAPH_ALGORITHMS_H_ #define CERES_INTERNAL_GRAPH_ALGORITHMS_H_ +#include <algorithm> #include <vector> -#include <glog/logging.h> +#include <utility> #include "ceres/collections_port.h" #include "ceres/graph.h" +#include "glog/logging.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/graph_algorithms_test.cc b/internal/ceres/graph_algorithms_test.cc index d5d4843..78a0452 100644 --- a/internal/ceres/graph_algorithms_test.cc +++ b/internal/ceres/graph_algorithms_test.cc
@@ -181,7 +181,7 @@ VertexDegreeLessThan<int> less_than(graph); for (int i = 0; i < 4; ++i) { - EXPECT_FALSE(less_than(i,i)) << "Failing vertex: " << i; + EXPECT_FALSE(less_than(i, i)) << "Failing vertex: " << i; for (int j = 0; j < 4; ++j) { if (i != j) { EXPECT_TRUE(less_than(i, j) ^ less_than(j, i))
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc index d352d02..cf5562a 100644 --- a/internal/ceres/iterative_schur_complement_solver.cc +++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -117,14 +117,16 @@ case SCHUR_JACOBI: if (preconditioner_.get() == NULL) { preconditioner_.reset( - new SchurJacobiPreconditioner(*A->block_structure(), preconditioner_options)); + new SchurJacobiPreconditioner( + *A->block_structure(), preconditioner_options)); } break; case CLUSTER_JACOBI: case CLUSTER_TRIDIAGONAL: if (preconditioner_.get() == NULL) { preconditioner_.reset( - new VisibilityBasedPreconditioner(*A->block_structure(), preconditioner_options)); + new VisibilityBasedPreconditioner( + *A->block_structure(), preconditioner_options)); } break; default:
diff --git a/internal/ceres/levenberg_marquardt_strategy.h b/internal/ceres/levenberg_marquardt_strategy.h index 90c2178..344e328 100644 --- a/internal/ceres/levenberg_marquardt_strategy.h +++ b/internal/ceres/levenberg_marquardt_strategy.h
@@ -43,8 +43,9 @@ // // http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf class LevenbergMarquardtStrategy : public TrustRegionStrategy { -public: - LevenbergMarquardtStrategy(const TrustRegionStrategy::Options& options); + public: + explicit LevenbergMarquardtStrategy( + const TrustRegionStrategy::Options& options); virtual ~LevenbergMarquardtStrategy(); // TrustRegionStrategy interface
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc index 0ce44c4..a5ce6a0 100644 --- a/internal/ceres/levenberg_marquardt_strategy_test.cc +++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -58,7 +58,7 @@ diagonal_(diagonal) { } - virtual ~RegularizationCheckingLinearSolver(){} + virtual ~RegularizationCheckingLinearSolver() {} private: virtual LinearSolver::Summary SolveImpl( @@ -111,12 +111,12 @@ } TEST(LevenbergMarquardtStrategy, CorrectDiagonalToLinearSolver) { - Matrix jacobian(2,3); + Matrix jacobian(2, 3); jacobian.setZero(); - jacobian(0,0) = 0.0; - jacobian(0,1) = 1.0; - jacobian(1,1) = 1.0; - jacobian(0,2) = 100.0; + jacobian(0, 0) = 0.0; + jacobian(0, 1) = 1.0; + jacobian(1, 1) = 1.0; + jacobian(0, 2) = 100.0; double residual = 1.0; double x[3]; @@ -148,7 +148,8 @@ EXPECT_CALL(log, Log(WARNING, _, HasSubstr("Failed to compute a finite step."))); - TrustRegionStrategy::Summary summary = lms.ComputeStep(pso, &dsm, &residual, x); + TrustRegionStrategy::Summary summary = + lms.ComputeStep(pso, &dsm, &residual, x); EXPECT_EQ(summary.termination_type, FAILURE); } }
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc index f441607..2f27a78 100644 --- a/internal/ceres/line_search_direction.cc +++ b/internal/ceres/line_search_direction.cc
@@ -80,7 +80,8 @@ } *search_direction = -current.gradient + beta * previous.search_direction; - const double directional_derivative = current. gradient.dot(*search_direction); + const double directional_derivative = + current. gradient.dot(*search_direction); if (directional_derivative > -function_tolerance_) { LOG(WARNING) << "Restarting non-linear conjugate gradients: " << directional_derivative; @@ -120,7 +121,7 @@ }; LineSearchDirection* -LineSearchDirection::Create(LineSearchDirection::Options& options) { +LineSearchDirection::Create(const LineSearchDirection::Options& options) { if (options.type == STEEPEST_DESCENT) { return new SteepestDescent; }
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h index d92c362..71063ab 100644 --- a/internal/ceres/line_search_direction.h +++ b/internal/ceres/line_search_direction.h
@@ -56,16 +56,15 @@ int max_lbfgs_rank; }; - static LineSearchDirection* Create(Options& options); + static LineSearchDirection* Create(const Options& options); virtual ~LineSearchDirection() {} virtual bool NextDirection(const LineSearchMinimizer::State& previous, const LineSearchMinimizer::State& current, Vector* search_direction) = 0; - }; } // namespace internal } // namespace ceres -#endif // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_ +#endif // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc index 3e3bcd0..bc6ff00 100644 --- a/internal/ceres/linear_least_squares_problems.cc +++ b/internal/ceres/linear_least_squares_problems.cc
@@ -734,7 +734,7 @@ } bool DumpLinearLeastSquaresProblem(const string& directory, - int iteration, + int iteration, DumpFormatType dump_format_type, const SparseMatrix* A, const double* D, @@ -742,18 +742,18 @@ const double* x, int num_eliminate_blocks) { switch (dump_format_type) { - case (CONSOLE): + case CONSOLE: return DumpLinearLeastSquaresProblemToConsole(directory, iteration, A, D, b, x, num_eliminate_blocks); - case (PROTOBUF): + case PROTOBUF: return DumpLinearLeastSquaresProblemToProtocolBuffer( directory, iteration, A, D, b, x, num_eliminate_blocks); - case (TEXTFILE): + case TEXTFILE: return DumpLinearLeastSquaresProblemToTextFile(directory, iteration, A, D, b, x,
diff --git a/internal/ceres/linear_least_squares_problems.h b/internal/ceres/linear_least_squares_problems.h index 553cc0d..c76ae91 100644 --- a/internal/ceres/linear_least_squares_problems.h +++ b/internal/ceres/linear_least_squares_problems.h
@@ -74,7 +74,7 @@ // Write the linear least squares problem to disk. The exact format // depends on dump_format_type. bool DumpLinearLeastSquaresProblem(const string& directory, - int iteration, + int iteration, DumpFormatType dump_format_type, const SparseMatrix* A, const double* D,
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h index 219939e..a980514 100644 --- a/internal/ceres/linear_solver.h +++ b/internal/ceres/linear_solver.h
@@ -35,17 +35,16 @@ #define CERES_INTERNAL_LINEAR_SOLVER_H_ #include <cstddef> +#include <map> #include <vector> - -#include <glog/logging.h> #include "ceres/block_sparse_matrix.h" #include "ceres/casts.h" #include "ceres/compressed_row_sparse_matrix.h" #include "ceres/dense_sparse_matrix.h" #include "ceres/execution_summary.h" -#include "ceres/execution_summary.h" #include "ceres/triplet_sparse_matrix.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/low_rank_inverse_hessian.h b/internal/ceres/low_rank_inverse_hessian.h index 5d59c14..6f3fc0c 100644 --- a/internal/ceres/low_rank_inverse_hessian.h +++ b/internal/ceres/low_rank_inverse_hessian.h
@@ -78,7 +78,7 @@ // LinearOperator interface virtual void RightMultiply(const double* x, double* y) const; virtual void LeftMultiply(const double* x, double* y) const { - RightMultiply(x,y); + RightMultiply(x, y); } virtual int num_rows() const { return num_parameters_; } virtual int num_cols() const { return num_parameters_; }
diff --git a/internal/ceres/map_util.h b/internal/ceres/map_util.h index ddf1252..929c6b3 100644 --- a/internal/ceres/map_util.h +++ b/internal/ceres/map_util.h
@@ -35,6 +35,7 @@ #include <utility> #include "ceres/internal/port.h" +#include "glog/logging.h" namespace ceres {
diff --git a/internal/ceres/minimizer.cc b/internal/ceres/minimizer.cc index 912ac99..2e2c15a 100644 --- a/internal/ceres/minimizer.cc +++ b/internal/ceres/minimizer.cc
@@ -35,7 +35,7 @@ namespace ceres { namespace internal { -Minimizer::~Minimizer() {}; +Minimizer::~Minimizer() {} bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks, const IterationSummary& iteration_summary,
diff --git a/internal/ceres/numeric_diff_functor_test.cc b/internal/ceres/numeric_diff_functor_test.cc index a37ceaf..acd7ff9 100644 --- a/internal/ceres/numeric_diff_functor_test.cc +++ b/internal/ceres/numeric_diff_functor_test.cc
@@ -46,7 +46,8 @@ namespace internal { TEST(NumericDiffCostFunction, EasyCaseCentralDifferences) { - typedef NumericDiffFunctor<EasyFunctor, CENTRAL, 3, 5, 5> NumericDiffEasyFunctor; + typedef NumericDiffFunctor<EasyFunctor, CENTRAL, 3, 5, 5> + NumericDiffEasyFunctor; internal::scoped_ptr<CostFunction> cost_function; EasyFunctor functor; @@ -64,7 +65,8 @@ } TEST(NumericDiffCostFunction, EasyCaseForwardDifferences) { - typedef NumericDiffFunctor<EasyFunctor, FORWARD, 3, 5, 5> NumericDiffEasyFunctor; + typedef NumericDiffFunctor<EasyFunctor, FORWARD, 3, 5, 5> + NumericDiffEasyFunctor; internal::scoped_ptr<CostFunction> cost_function; EasyFunctor functor;
diff --git a/internal/ceres/numeric_diff_test_utils.cc b/internal/ceres/numeric_diff_test_utils.cc index 6786ac9..3572afa 100644 --- a/internal/ceres/numeric_diff_test_utils.cc +++ b/internal/ceres/numeric_diff_test_utils.cc
@@ -1,3 +1,33 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + #include "ceres/numeric_diff_test_utils.h" #include <algorithm>
diff --git a/internal/ceres/numeric_diff_test_utils.h b/internal/ceres/numeric_diff_test_utils.h index 1a43df2..dd5f21e 100644 --- a/internal/ceres/numeric_diff_test_utils.h +++ b/internal/ceres/numeric_diff_test_utils.h
@@ -1,3 +1,36 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_NUMERIC_DIFF_TEST_UTILS_H_ +#define CERES_INTERNAL_NUMERIC_DIFF_TEST_UTILS_H_ + #include "ceres/cost_function.h" #include "ceres/sized_cost_function.h" #include "ceres/types.h" @@ -54,3 +87,5 @@ } // namespace internal } // namespace ceres + +#endif // CERES_INTERNAL_NUMERIC_DIFF_TEST_UTILS_H_
diff --git a/internal/ceres/polynomial_test.cc b/internal/ceres/polynomial_test.cc index 7c992fa..3339973 100644 --- a/internal/ceres/polynomial_test.cc +++ b/internal/ceres/polynomial_test.cc
@@ -342,8 +342,8 @@ TEST(Polynomial, QuadraticInterpolatingPolynomial) { // p(x) = 2x^2 + 3x + 2 - Vector true_polynomial(3); - true_polynomial << 2.0, 3.0, 2.0; + Vector true_polynomial(3); + true_polynomial << 2.0, 3.0, 2.0; vector<FunctionSample> samples; { @@ -401,10 +401,10 @@ TEST(Polynomial, CubicInterpolatingPolynomialFromValues) { // p(x) = x^3 + 2x^2 + 3x + 2 - Vector true_polynomial(4); - true_polynomial << 1.0, 2.0, 3.0, 2.0; + Vector true_polynomial(4); + true_polynomial << 1.0, 2.0, 3.0, 2.0; - vector<FunctionSample> samples; + vector<FunctionSample> samples; { FunctionSample sample; sample.x = 1.0; @@ -443,11 +443,11 @@ TEST(Polynomial, CubicInterpolatingPolynomialFromValuesAndOneGradient) { // p(x) = x^3 + 2x^2 + 3x + 2 - Vector true_polynomial(4); - true_polynomial << 1.0, 2.0, 3.0, 2.0; - Vector true_gradient_polynomial = DifferentiatePolynomial(true_polynomial); + Vector true_polynomial(4); + true_polynomial << 1.0, 2.0, 3.0, 2.0; + Vector true_gradient_polynomial = DifferentiatePolynomial(true_polynomial); - vector<FunctionSample> samples; + vector<FunctionSample> samples; { FunctionSample sample; sample.x = 1.0; @@ -480,11 +480,11 @@ TEST(Polynomial, CubicInterpolatingPolynomialFromValuesAndGradients) { // p(x) = x^3 + 2x^2 + 3x + 2 - Vector true_polynomial(4); - true_polynomial << 1.0, 2.0, 3.0, 2.0; - Vector true_gradient_polynomial = DifferentiatePolynomial(true_polynomial); + Vector true_polynomial(4); + true_polynomial << 1.0, 2.0, 3.0, 2.0; + Vector true_gradient_polynomial = DifferentiatePolynomial(true_polynomial); - vector<FunctionSample> samples; + vector<FunctionSample> samples; { FunctionSample sample; sample.x = -3.0;
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc index c8f4a21..43e7883 100644 --- a/internal/ceres/problem.cc +++ b/internal/ceres/problem.cc
@@ -32,6 +32,7 @@ #include "ceres/problem.h" #include <vector> +#include "ceres/crs_matrix.h" #include "ceres/problem_impl.h" namespace ceres { @@ -139,9 +140,10 @@ LossFunction* loss_function, double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, double* x6, double* x7, double* x8, double* x9) { - return problem_impl_->AddResidualBlock(cost_function, - loss_function, - x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); + return problem_impl_->AddResidualBlock( + cost_function, + loss_function, + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); } void Problem::AddParameterBlock(double* values, int size) { @@ -176,6 +178,18 @@ problem_impl_->SetParameterization(values, local_parameterization); } +bool Problem::Evaluate(const EvaluateOptions& evaluate_options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian) { + return problem_impl_->Evaluate(evaluate_options, + cost, + residuals, + gradient, + jacobian); +} + int Problem::NumParameterBlocks() const { return problem_impl_->NumParameterBlocks(); }
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc index ae4e7ed..bc378aa 100644 --- a/internal/ceres/problem_impl.cc +++ b/internal/ceres/problem_impl.cc
@@ -37,7 +37,11 @@ #include <string> #include <utility> #include <vector> +#include "ceres/casts.h" +#include "ceres/compressed_row_sparse_matrix.h" #include "ceres/cost_function.h" +#include "ceres/crs_matrix.h" +#include "ceres/evaluator.h" #include "ceres/loss_function.h" #include "ceres/map_util.h" #include "ceres/parameter_block.h" @@ -224,7 +228,7 @@ if (duplicate_items != sorted_parameter_blocks.end()) { string blocks; for (int i = 0; i < parameter_blocks.size(); ++i) { - blocks += internal::StringPrintf(" %p ", parameter_blocks[i]); + blocks += StringPrintf(" %p ", parameter_blocks[i]); } LOG(FATAL) << "Duplicate parameter blocks in a residual parameter " @@ -513,6 +517,164 @@ ->SetParameterization(local_parameterization); } +bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian) { + if (cost == NULL && + residuals == NULL && + gradient == NULL && + jacobian == NULL) { + LOG(INFO) << "Nothing to do."; + return true; + } + + // If the user supplied residual blocks, then use them, otherwise + // take the residual blocks from the underlying program. + Program program; + *program.mutable_residual_blocks() = + ((evaluate_options.residual_blocks.size() > 0) + ? evaluate_options.residual_blocks : program_->residual_blocks()); + + const vector<double*>& parameter_block_ptrs = + evaluate_options.parameter_blocks; + + vector<ParameterBlock*> variable_parameter_blocks; + vector<ParameterBlock*>& parameter_blocks = + *program.mutable_parameter_blocks(); + + if (parameter_block_ptrs.size() == 0) { + // The user did not provide any parameter blocks, so default to + // using all the parameter blocks in the order that they are in + // the underlying program object. + parameter_blocks = program_->parameter_blocks(); + } else { + // The user supplied a vector of parameter blocks. Using this list + // requires a number of steps. + + // 1. Convert double* into ParameterBlock* + parameter_blocks.resize(parameter_block_ptrs.size()); + for (int i = 0; i < parameter_block_ptrs.size(); ++i) { + parameter_blocks[i] = + FindOrDie(parameter_block_map_, parameter_block_ptrs[i]); + } + + // 2. The user may have only supplied a subset of parameter + // blocks, so identify the ones that are not supplied by the user + // and are NOT constant. These parameter blocks are stored in + // variable_parameter_blocks. + // + // To ensure that the parameter blocks are not included in the + // columns of the jacobian, we need to make sure that they are + // constant during evaluation and then make them variable again + // after we are done. + vector<ParameterBlock*> all_parameter_blocks(program_->parameter_blocks()); + vector<ParameterBlock*> included_parameter_blocks( + program.parameter_blocks()); + + vector<ParameterBlock*> excluded_parameter_blocks; + sort(all_parameter_blocks.begin(), all_parameter_blocks.end()); + sort(included_parameter_blocks.begin(), included_parameter_blocks.end()); + set_difference(all_parameter_blocks.begin(), + all_parameter_blocks.end(), + included_parameter_blocks.begin(), + included_parameter_blocks.end(), + back_inserter(excluded_parameter_blocks)); + + variable_parameter_blocks.reserve(excluded_parameter_blocks.size()); + for (int i = 0; i < excluded_parameter_blocks.size(); ++i) { + ParameterBlock* parameter_block = excluded_parameter_blocks[i]; + if (!parameter_block->IsConstant()) { + variable_parameter_blocks.push_back(parameter_block); + parameter_block->SetConstant(); + } + } + } + + // Setup the Parameter indices and offsets before an evaluator can + // be constructed and used. + program.SetParameterOffsetsAndIndex(); + + Evaluator::Options evaluator_options; + + // Even though using SPARSE_NORMAL_CHOLESKY requires SuiteSparse or + // CXSparse, here it just being used for telling the evaluator to + // use a SparseRowCompressedMatrix for the jacobian. This is because + // the Evaluator decides the storage for the Jacobian based on the + // type of linear solver being used. + evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY; + evaluator_options.num_threads = evaluate_options.num_threads; + + string error; + scoped_ptr<Evaluator> evaluator( + Evaluator::Create(evaluator_options, &program, &error)); + if (evaluator.get() == NULL) { + LOG(ERROR) << "Unable to create an Evaluator object. " + << "Error: " << error + << "This is a Ceres bug; please contact the developers!"; + + // Make the parameter blocks that were temporarily marked + // constant, variable again. + for (int i = 0; i < variable_parameter_blocks.size(); ++i) { + variable_parameter_blocks[i]->SetVarying(); + } + return false; + } + + if (residuals !=NULL) { + residuals->resize(evaluator->NumResiduals()); + } + + if (gradient != NULL) { + gradient->resize(evaluator->NumEffectiveParameters()); + } + + scoped_ptr<CompressedRowSparseMatrix> tmp_jacobian; + if (jacobian != NULL) { + tmp_jacobian.reset( + down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian())); + } + + // Point the state pointers to the user state pointers. This is + // needed so that we can extract a parameter vector which is then + // passed to Evaluator::Evaluate. + program.SetParameterBlockStatePtrsToUserStatePtrs(); + + // Copy the value of the parameter blocks into a vector, since the + // Evaluate::Evaluate method needs its input as such. The previous + // call to SetParameterBlockStatePtrsToUserStatePtrs ensures that + // these values are the ones corresponding to the actual state of + // the parameter blocks, rather than the temporary state pointer + // used for evaluation. + Vector parameters(program.NumParameters()); + program.ParameterBlocksToStateVector(parameters.data()); + + double tmp_cost = 0; + bool status = evaluator->Evaluate(parameters.data(), + &tmp_cost, + residuals != NULL ? &(*residuals)[0] : NULL, + gradient != NULL ? &(*gradient)[0] : NULL, + tmp_jacobian.get()); + + // Make the parameter blocks that were temporarirly marked + // constant, variable again. + for (int i = 0; i < variable_parameter_blocks.size(); ++i) { + variable_parameter_blocks[i]->SetVarying(); + } + + if (status) { + if (cost != NULL) { + *cost = tmp_cost; + } + if (jacobian != NULL) { + tmp_jacobian->ToCRSMatrix(jacobian); + } + } + + return status; +} + int ProblemImpl::NumParameterBlocks() const { return program_->NumParameterBlocks(); }
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h index 536e73a..ccc315d 100644 --- a/internal/ceres/problem_impl.h +++ b/internal/ceres/problem_impl.h
@@ -53,6 +53,7 @@ class CostFunction; class LossFunction; class LocalParameterization; +struct CRSMatrix; namespace internal { @@ -126,6 +127,13 @@ void SetParameterBlockVariable(double* values); void SetParameterization(double* values, LocalParameterization* local_parameterization); + + bool Evaluate(const Problem::EvaluateOptions& options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian); + int NumParameterBlocks() const; int NumParameters() const; int NumResidualBlocks() const; @@ -139,6 +147,12 @@ private: ParameterBlock* InternalAddParameterBlock(double* values, int size); + bool InternalEvaluate(Program* program, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian); + // Delete the arguments in question. These differ from the Remove* functions // in that they do not clean up references to the block to delete; they // merely delete them.
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc index 55f355b..130148d 100644 --- a/internal/ceres/problem_test.cc +++ b/internal/ceres/problem_test.cc
@@ -32,14 +32,20 @@ #include "ceres/problem.h" #include "ceres/problem_impl.h" -#include "gtest/gtest.h" +#include "ceres/casts.h" #include "ceres/cost_function.h" +#include "ceres/crs_matrix.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/scoped_ptr.h" #include "ceres/local_parameterization.h" #include "ceres/map_util.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/sized_cost_function.h" -#include "ceres/internal/scoped_ptr.h" +#include "ceres/sparse_matrix.h" +#include "ceres/types.h" +#include "gtest/gtest.h" + namespace ceres { namespace internal { @@ -695,5 +701,541 @@ DynamicProblem, ::testing::Values(true, false)); +// Test for Problem::Evaluate + +// TODO(sameeragarwal): The following struct and function are shared +// with evaluator_test.cc. Once things settle down, do an +// evaluate_utils.h or some such thing to reduce code duplication. The +// best time is perhaps when we remove the support for +// Solver::Summary::initial_* +struct ExpectedEvaluation { + int num_rows; + int num_cols; + double cost; + const double residuals[50]; + const double gradient[50]; + const double jacobian[200]; +}; + +void CompareEvaluations(int expected_num_rows, + int expected_num_cols, + double expected_cost, + const double* expected_residuals, + const double* expected_gradient, + const double* expected_jacobian, + const double actual_cost, + const double* actual_residuals, + const double* actual_gradient, + const double* actual_jacobian) { + EXPECT_EQ(expected_cost, actual_cost); + + if (expected_residuals != NULL) { + ConstVectorRef expected_residuals_vector(expected_residuals, + expected_num_rows); + ConstVectorRef actual_residuals_vector(actual_residuals, + expected_num_rows); + EXPECT_TRUE((actual_residuals_vector.array() == + expected_residuals_vector.array()).all()) + << "Actual:\n" << actual_residuals_vector + << "\nExpected:\n" << expected_residuals_vector; + } + + if (expected_gradient != NULL) { + ConstVectorRef expected_gradient_vector(expected_gradient, + expected_num_cols); + ConstVectorRef actual_gradient_vector(actual_gradient, + expected_num_cols); + + EXPECT_TRUE((actual_gradient_vector.array() == + expected_gradient_vector.array()).all()) + << "Actual:\n" << actual_gradient_vector.transpose() + << "\nExpected:\n" << expected_gradient_vector.transpose(); + } + + if (expected_jacobian != NULL) { + ConstMatrixRef expected_jacobian_matrix(expected_jacobian, + expected_num_rows, + expected_num_cols); + ConstMatrixRef actual_jacobian_matrix(actual_jacobian, + expected_num_rows, + expected_num_cols); + EXPECT_TRUE((actual_jacobian_matrix.array() == + expected_jacobian_matrix.array()).all()) + << "Actual:\n" << actual_jacobian_matrix + << "\nExpected:\n" << expected_jacobian_matrix; + } +} + +// Simple cost function used for testing Problem::Evaluate. +// +// r_i = i - (j + 1) * x_ij^2 +template <int kNumResiduals, int kNumParameterBlocks > +class QuadraticCostFunction : public CostFunction { + public: + QuadraticCostFunction() { + CHECK_GT(kNumResiduals, 0); + CHECK_GT(kNumParameterBlocks, 0); + set_num_residuals(kNumResiduals); + for (int i = 0; i < kNumParameterBlocks; ++i) { + mutable_parameter_block_sizes()->push_back(kNumResiduals); + } + } + + virtual bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const { + for (int i = 0; i < kNumResiduals; ++i) { + residuals[i] = i; + for (int j = 0; j < kNumParameterBlocks; ++j) { + residuals[i] -= (j + 1.0) * parameters[j][i] * parameters[j][i]; + } + } + + if (jacobians == NULL) { + return true; + } + + for (int j = 0; j < kNumParameterBlocks; ++j) { + if (jacobians[j] != NULL) { + MatrixRef(jacobians[j], kNumResiduals, kNumResiduals) = + (-2.0 * (j + 1.0) * + ConstVectorRef(parameters[j], kNumResiduals)).asDiagonal(); + } + } + + return true; + } +}; + +// Convert a CRSMatrix to a dense Eigen matrix. +void CRSToDenseMatrix(const CRSMatrix& input, Matrix* output) { + Matrix& m = *CHECK_NOTNULL(output); + m.resize(input.num_rows, input.num_cols); + m.setZero(); + for (int row = 0; row < input.num_rows; ++row) { + for (int j = input.rows[row]; j < input.rows[row + 1]; ++j) { + const int col = input.cols[j]; + m(row, col) = input.values[j]; + } + } +} + +class ProblemEvaluateTest : public ::testing::Test { + protected: + void SetUp() { + for (int i = 0; i < 6; ++i) { + parameters_[i] = static_cast<double>(i + 1); + } + + parameter_blocks_.push_back(parameters_); + parameter_blocks_.push_back(parameters_ + 2); + parameter_blocks_.push_back(parameters_ + 4); + + + CostFunction* cost_function = new QuadraticCostFunction<2, 2>; + + // f(x, y) + residual_blocks_.push_back( + problem_.AddResidualBlock(cost_function, + NULL, + parameters_, + parameters_ + 2)); + // g(y, z) + residual_blocks_.push_back( + problem_.AddResidualBlock(cost_function, + NULL, parameters_ + 2, + parameters_ + 4)); + // h(z, x) + residual_blocks_.push_back( + problem_.AddResidualBlock(cost_function, + NULL, + parameters_ + 4, + parameters_)); + } + + + + void EvaluateAndCompare(const Problem::EvaluateOptions& options, + const int expected_num_rows, + const int expected_num_cols, + const double expected_cost, + const double* expected_residuals, + const double* expected_gradient, + const double* expected_jacobian) { + double cost; + vector<double> residuals; + vector<double> gradient; + CRSMatrix jacobian; + + EXPECT_TRUE( + problem_.Evaluate(options, + &cost, + expected_residuals != NULL ? &residuals : NULL, + expected_gradient != NULL ? &gradient : NULL, + expected_jacobian != NULL ? &jacobian : NULL)); + + if (expected_residuals != NULL) { + EXPECT_EQ(residuals.size(), expected_num_rows); + } + + if (expected_gradient != NULL) { + EXPECT_EQ(gradient.size(), expected_num_cols); + } + + if (expected_jacobian != NULL) { + EXPECT_EQ(jacobian.num_rows, expected_num_rows); + EXPECT_EQ(jacobian.num_cols, expected_num_cols); + } + + Matrix dense_jacobian; + if (expected_jacobian != NULL) { + CRSToDenseMatrix(jacobian, &dense_jacobian); + } + + CompareEvaluations(expected_num_rows, + expected_num_cols, + expected_cost, + expected_residuals, + expected_gradient, + expected_jacobian, + cost, + residuals.size() > 0 ? &residuals[0] : NULL, + gradient.size() > 0 ? &gradient[0] : NULL, + dense_jacobian.data()); + } + + void CheckAllEvaluationCombinations(const Problem::EvaluateOptions& options, + const ExpectedEvaluation& expected) { + for (int i = 0; i < 8; ++i) { + EvaluateAndCompare(options, + expected.num_rows, + expected.num_cols, + expected.cost, + (i & 1) ? expected.residuals : NULL, + (i & 2) ? expected.gradient : NULL, + (i & 4) ? expected.jacobian : NULL); + } + } + + ProblemImpl problem_; + double parameters_[6]; + vector<double*> parameter_blocks_; + vector<ResidualBlockId> residual_blocks_; +}; + + +TEST_F(ProblemEvaluateTest, MultipleParameterAndResidualBlocks) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 6, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -59.0, -87.0, // g + -27.0, -43.0 // h + }, + // Gradient + { 146.0, 484.0, // x + 582.0, 1256.0, // y + 1450.0, 2604.0, // z + }, + // Jacobian + // x y z + { /* f(x, y) */ -2.0, 0.0, -12.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, -16.0, 0.0, 0.0, + /* g(y, z) */ 0.0, 0.0, -6.0, 0.0, -20.0, 0.0, + 0.0, 0.0, 0.0, -8.0, 0.0, -24.0, + /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, 0.0, 0.0, -12.0 + } + }; + + CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected); +} + +TEST_F(ProblemEvaluateTest, ParameterAndResidualBlocksPassedInOptions) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 6, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -59.0, -87.0, // g + -27.0, -43.0 // h + }, + // Gradient + { 146.0, 484.0, // x + 582.0, 1256.0, // y + 1450.0, 2604.0, // z + }, + // Jacobian + // x y z + { /* f(x, y) */ -2.0, 0.0, -12.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, -16.0, 0.0, 0.0, + /* g(y, z) */ 0.0, 0.0, -6.0, 0.0, -20.0, 0.0, + 0.0, 0.0, 0.0, -8.0, 0.0, -24.0, + /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, 0.0, 0.0, -12.0 + } + }; + + Problem::EvaluateOptions evaluate_options; + evaluate_options.parameter_blocks = parameter_blocks_; + evaluate_options.residual_blocks = residual_blocks_; + CheckAllEvaluationCombinations(evaluate_options, expected); +} + +TEST_F(ProblemEvaluateTest, ReorderedResidualBlocks) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 6, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -27.0, -43.0, // h + -59.0, -87.0 // g + }, + // Gradient + { 146.0, 484.0, // x + 582.0, 1256.0, // y + 1450.0, 2604.0, // z + }, + // Jacobian + // x y z + { /* f(x, y) */ -2.0, 0.0, -12.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, -16.0, 0.0, 0.0, + /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, 0.0, 0.0, -12.0, + /* g(y, z) */ 0.0, 0.0, -6.0, 0.0, -20.0, 0.0, + 0.0, 0.0, 0.0, -8.0, 0.0, -24.0 + } + }; + + Problem::EvaluateOptions evaluate_options; + evaluate_options.parameter_blocks = parameter_blocks_; + + // f, h, g + evaluate_options.residual_blocks.push_back(residual_blocks_[0]); + evaluate_options.residual_blocks.push_back(residual_blocks_[2]); + evaluate_options.residual_blocks.push_back(residual_blocks_[1]); + + CheckAllEvaluationCombinations(evaluate_options, expected); +} + +TEST_F(ProblemEvaluateTest, ReorderedResidualBlocksAndReorderedParameterBlocks) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 6, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -27.0, -43.0, // h + -59.0, -87.0 // g + }, + // Gradient + { 1450.0, 2604.0, // z + 582.0, 1256.0, // y + 146.0, 484.0, // x + }, + // Jacobian + // z y x + { /* f(x, y) */ 0.0, 0.0, -12.0, 0.0, -2.0, 0.0, + 0.0, 0.0, 0.0, -16.0, 0.0, -4.0, + /* h(z, x) */ -10.0, 0.0, 0.0, 0.0, -4.0, 0.0, + 0.0, -12.0, 0.0, 0.0, 0.0, -8.0, + /* g(y, z) */ -20.0, 0.0, -6.0, 0.0, 0.0, 0.0, + 0.0, -24.0, 0.0, -8.0, 0.0, 0.0 + } + }; + + Problem::EvaluateOptions evaluate_options; + // z, y, x + evaluate_options.parameter_blocks.push_back(parameter_blocks_[2]); + evaluate_options.parameter_blocks.push_back(parameter_blocks_[1]); + evaluate_options.parameter_blocks.push_back(parameter_blocks_[0]); + + // f, h, g + evaluate_options.residual_blocks.push_back(residual_blocks_[0]); + evaluate_options.residual_blocks.push_back(residual_blocks_[2]); + evaluate_options.residual_blocks.push_back(residual_blocks_[1]); + + CheckAllEvaluationCombinations(evaluate_options, expected); +} + +TEST_F(ProblemEvaluateTest, ConstantParameterBlock) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 6, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -59.0, -87.0, // g + -27.0, -43.0 // h + }, + + // Gradient + { 146.0, 484.0, // x + 0.0, 0.0, // y + 1450.0, 2604.0, // z + }, + + // Jacobian + // x y z + { /* f(x, y) */ -2.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, + /* g(y, z) */ 0.0, 0.0, 0.0, 0.0, -20.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, -24.0, + /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, 0.0, 0.0, -12.0 + } + }; + + problem_.SetParameterBlockConstant(parameters_ + 2); + CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected); +} + +TEST_F(ProblemEvaluateTest, ExcludedAResidualBlock) { + ExpectedEvaluation expected = { + // Rows/columns + 4, 6, + // Cost + 2082.0, + // Residuals + { -19.0, -35.0, // f + -27.0, -43.0 // h + }, + // Gradient + { 146.0, 484.0, // x + 228.0, 560.0, // y + 270.0, 516.0, // z + }, + // Jacobian + // x y z + { /* f(x, y) */ -2.0, 0.0, -12.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, -16.0, 0.0, 0.0, + /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, 0.0, 0.0, -12.0 + } + }; + + Problem::EvaluateOptions evaluate_options; + evaluate_options.residual_blocks.push_back(residual_blocks_[0]); + evaluate_options.residual_blocks.push_back(residual_blocks_[2]); + + CheckAllEvaluationCombinations(evaluate_options, expected); +} + +TEST_F(ProblemEvaluateTest, ExcludedParameterBlock) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 4, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -59.0, -87.0, // g + -27.0, -43.0 // h + }, + + // Gradient + { 146.0, 484.0, // x + 1450.0, 2604.0, // z + }, + + // Jacobian + // x z + { /* f(x, y) */ -2.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, + /* g(y, z) */ 0.0, 0.0, -20.0, 0.0, + 0.0, 0.0, 0.0, -24.0, + /* h(z, x) */ -4.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, -12.0 + } + }; + + Problem::EvaluateOptions evaluate_options; + // x, z + evaluate_options.parameter_blocks.push_back(parameter_blocks_[0]); + evaluate_options.parameter_blocks.push_back(parameter_blocks_[2]); + evaluate_options.residual_blocks = residual_blocks_; + CheckAllEvaluationCombinations(evaluate_options, expected); +} + +TEST_F(ProblemEvaluateTest, ExcludedParameterBlockAndExcludedResidualBlock) { + ExpectedEvaluation expected = { + // Rows/columns + 4, 4, + // Cost + 6318.0, + // Residuals + { -19.0, -35.0, // f + -59.0, -87.0, // g + }, + + // Gradient + { 38.0, 140.0, // x + 1180.0, 2088.0, // z + }, + + // Jacobian + // x z + { /* f(x, y) */ -2.0, 0.0, 0.0, 0.0, + 0.0, -4.0, 0.0, 0.0, + /* g(y, z) */ 0.0, 0.0, -20.0, 0.0, + 0.0, 0.0, 0.0, -24.0, + } + }; + + Problem::EvaluateOptions evaluate_options; + // x, z + evaluate_options.parameter_blocks.push_back(parameter_blocks_[0]); + evaluate_options.parameter_blocks.push_back(parameter_blocks_[2]); + evaluate_options.residual_blocks.push_back(residual_blocks_[0]); + evaluate_options.residual_blocks.push_back(residual_blocks_[1]); + + CheckAllEvaluationCombinations(evaluate_options, expected); +} + +TEST_F(ProblemEvaluateTest, LocalParameterization) { + ExpectedEvaluation expected = { + // Rows/columns + 6, 5, + // Cost + 7607.0, + // Residuals + { -19.0, -35.0, // f + -59.0, -87.0, // g + -27.0, -43.0 // h + }, + // Gradient + { 146.0, 484.0, // x + 1256.0, // y with SubsetParameterization + 1450.0, 2604.0, // z + }, + // Jacobian + // x y z + { /* f(x, y) */ -2.0, 0.0, 0.0, 0.0, 0.0, + 0.0, -4.0, -16.0, 0.0, 0.0, + /* g(y, z) */ 0.0, 0.0, 0.0, -20.0, 0.0, + 0.0, 0.0, -8.0, 0.0, -24.0, + /* h(z, x) */ -4.0, 0.0, 0.0, -10.0, 0.0, + 0.0, -8.0, 0.0, 0.0, -12.0 + } + }; + + vector<int> constant_parameters; + constant_parameters.push_back(0); + problem_.SetParameterization(parameters_ + 2, + new SubsetParameterization(2, + constant_parameters)); + + CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected); +} + } // namespace internal } // namespace ceres
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h index e9640ab..a19cdf8 100644 --- a/internal/ceres/program_evaluator.h +++ b/internal/ceres/program_evaluator.h
@@ -83,12 +83,14 @@ #include <omp.h> #endif +#include <map> +#include <vector> +#include "ceres/execution_summary.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/scoped_ptr.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" -#include "ceres/internal/eigen.h" -#include "ceres/internal/scoped_ptr.h" -#include "ceres/execution_summary.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc index e18a14b..8de1bbd 100644 --- a/internal/ceres/rotation_test.cc +++ b/internal/ceres/rotation_test.cc
@@ -967,7 +967,8 @@ TEST(MatrixAdapter, RowMajor3x3ReturnTypeAndAccessIsCorrect) { double array[9] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 }; - const float const_array[9] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }; + const float const_array[9] = + { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }; MatrixAdapter<double, 3, 1> A = RowMajorAdapter3x3(array); MatrixAdapter<const float, 3, 1> B = RowMajorAdapter3x3(const_array); @@ -975,24 +976,25 @@ for (int j = 0; j < 3; ++j) { // The values are integers from 1 to 9, so equality tests are appropriate // even for float and double values. - EXPECT_EQ(A(i,j), array[3*i+j]); - EXPECT_EQ(B(i,j), const_array[3*i+j]); + EXPECT_EQ(A(i, j), array[3*i+j]); + EXPECT_EQ(B(i, j), const_array[3*i+j]); } } } TEST(MatrixAdapter, ColumnMajor3x3ReturnTypeAndAccessIsCorrect) { double array[9] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 }; - const float const_array[9] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }; + const float const_array[9] = + { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }; MatrixAdapter<double, 1, 3> A = ColumnMajorAdapter3x3(array); MatrixAdapter<const float, 1, 3> B = ColumnMajorAdapter3x3(const_array); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { - // The values are integers from 1 to 9, so equality tests are appropriate - // even for float and double values. - EXPECT_EQ(A(i,j), array[3*j+i]); - EXPECT_EQ(B(i,j), const_array[3*j+i]); + // The values are integers from 1 to 9, so equality tests are + // appropriate even for float and double values. + EXPECT_EQ(A(i, j), array[3*j+i]); + EXPECT_EQ(B(i, j), const_array[3*j+i]); } } }
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h index c382f95..7e98f31 100644 --- a/internal/ceres/schur_complement_solver.h +++ b/internal/ceres/schur_complement_solver.h
@@ -33,6 +33,7 @@ #include <set> #include <utility> +#include <vector> #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h"
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h index 6120db9..339c44b 100644 --- a/internal/ceres/schur_eliminator_impl.h +++ b/internal/ceres/schur_eliminator_impl.h
@@ -50,7 +50,6 @@ #include <algorithm> #include <map> -#include <glog/logging.h> #include "Eigen/Dense" #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h" @@ -60,6 +59,7 @@ #include "ceres/stl_util.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" +#include "glog/logging.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc index b1bd2d8..6436d2d 100644 --- a/internal/ceres/solver.cc +++ b/internal/ceres/solver.cc
@@ -149,8 +149,6 @@ using internal::StringAppendF; string Solver::Summary::FullReport() const { - - string report = "\n" "Ceres Solver Report\n" @@ -181,7 +179,8 @@ // TODO(sameeragarwal): Refactor this into separate functions. if (minimizer_type == TRUST_REGION) { - StringAppendF(&report, "\nMinimizer %19s\n", "TRUST_REGION"); + StringAppendF(&report, "\nMinimizer %19s\n", + "TRUST_REGION"); if (linear_solver_type_used == SPARSE_NORMAL_CHOLESKY || linear_solver_type_used == SPARSE_SCHUR || (linear_solver_type_used == ITERATIVE_SCHUR && @@ -261,7 +260,8 @@ StringAppendF(&report, "\nCost:\n"); StringAppendF(&report, "Initial % 30e\n", initial_cost); - if (termination_type != NUMERICAL_FAILURE && termination_type != USER_ABORT) { + if (termination_type != NUMERICAL_FAILURE && + termination_type != USER_ABORT) { StringAppendF(&report, "Final % 30e\n", final_cost); StringAppendF(&report, "Change % 30e\n", initial_cost - final_cost); @@ -304,7 +304,8 @@ max_lbfgs_rank); } else { StringAppendF(&report, "Line search direction %19s\n", - LineSearchDirectionTypeToString(line_search_direction_type)); + LineSearchDirectionTypeToString( + line_search_direction_type)); } StringAppendF(&report, "Line search type %19s\n", LineSearchTypeToString(line_search_type)); @@ -328,13 +329,15 @@ StringAppendF(&report, "\nCost:\n"); StringAppendF(&report, "Initial % 30e\n", initial_cost); - if (termination_type != NUMERICAL_FAILURE && termination_type != USER_ABORT) { + if (termination_type != NUMERICAL_FAILURE && + termination_type != USER_ABORT) { StringAppendF(&report, "Final % 30e\n", final_cost); StringAppendF(&report, "Change % 30e\n", initial_cost - final_cost); } - StringAppendF(&report, "\nNumber of iterations: % 20ld\n", iterations.size() - 1); + StringAppendF(&report, "\nNumber of iterations: % 20ld\n", + iterations.size() - 1); StringAppendF(&report, "\nTime (in seconds):\n"); StringAppendF(&report, "Preprocessor %25.3f\n",
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc index 803b711..7f85fbe 100644 --- a/internal/ceres/solver_impl.cc +++ b/internal/ceres/solver_impl.cc
@@ -77,20 +77,6 @@ double* parameters_; }; -// Macro used to evaluate initial and final residuals, gradient and -// jacobian if requested by the user. -// -// We need a macro here, instead of a simple function call, since the -// multiplexing happens on variable names. -#define CERES_EVALUATE(which) \ - Evaluator::Evaluate( \ - original_program, \ - options.num_threads, \ - &summary->which ## _cost, \ - options.return_ ## which ## _residuals ? &summary->which ## _residuals : NULL, \ - options.return_ ## which ## _gradient ? &summary->which ## _gradient : NULL, \ - options.return_ ## which ## _jacobian ? &summary->which ## _jacobian : NULL) - void SetSummaryFinalCost(Solver::Summary* summary) { summary->final_cost = summary->initial_cost; // We need the loop here, instead of just looking at the last @@ -240,7 +226,8 @@ file_logging_callback.get()); } - TrustRegionLoggingCallback logging_callback(options.minimizer_progress_to_stdout); + TrustRegionLoggingCallback logging_callback( + options.minimizer_progress_to_stdout); if (options.logging_type != SILENT) { minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(), &logging_callback); @@ -299,7 +286,8 @@ file_logging_callback.get()); } - LineSearchLoggingCallback logging_callback(options.minimizer_progress_to_stdout); + LineSearchLoggingCallback logging_callback( + options.minimizer_progress_to_stdout); if (options.logging_type != SILENT) { minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(), &logging_callback); @@ -400,21 +388,6 @@ event_logger.AddEvent("Init"); - // Evaluate the initial cost, residual vector and the jacobian - // matrix if requested by the user. - if (options.return_initial_residuals || - options.return_initial_gradient || - options.return_initial_jacobian) { - if (!CERES_EVALUATE(initial)) { - summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Unable to evaluate the initial cost."; - LOG(ERROR) << summary->error; - return; - } - } - - event_logger.AddEvent("InitialEvaluate"); - original_program->SetParameterBlockStatePtrsToUserStatePtrs(); event_logger.AddEvent("SetParameterBlockPtrs"); @@ -463,6 +436,7 @@ problem_impl, &summary->fixed_cost, &summary->error)); + event_logger.AddEvent("CreateReducedProgram"); if (reduced_program == NULL) { return; @@ -480,33 +454,21 @@ summary->preprocessor_time_in_seconds = WallTimeInSeconds() - solver_start_time; + double post_process_start_time = WallTimeInSeconds(); LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. " << "No non-constant parameter blocks found."; + summary->initial_cost = summary->fixed_cost; + summary->final_cost = summary->fixed_cost; + // FUNCTION_TOLERANCE is the right convergence here, as we know // that the objective function is constant and cannot be changed // any further. summary->termination_type = FUNCTION_TOLERANCE; - double post_process_start_time = WallTimeInSeconds(); - - // Evaluate the final cost, residual vector and the jacobian - // matrix if requested by the user. - if (options.return_final_residuals || - options.return_final_gradient || - options.return_final_jacobian) { - if (!CERES_EVALUATE(final)) { - summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Unable to evaluate the final cost."; - LOG(ERROR) << summary->error; - return; - } - } - // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); - event_logger.AddEvent("FinalEvaluate"); summary->postprocessor_time_in_seconds = WallTimeInSeconds() - post_process_start_time; return; @@ -612,42 +574,21 @@ double post_process_start_time = WallTimeInSeconds(); - // Push the contiguous optimized parameters back to the user's parameters. + // Push the contiguous optimized parameters back to the user's + // parameters. reduced_program->StateVectorToParameterBlocks(parameters.data()); reduced_program->CopyParameterBlockStateToUserState(); - // Evaluate the final cost, residual vector and the jacobian - // matrix if requested by the user. - if (options.return_final_residuals || - options.return_final_gradient || - options.return_final_jacobian) { - if (!CERES_EVALUATE(final)) { - // This failure requires careful handling. - // - // At this point, we have modified the user's state, but the - // evaluation failed and we inform him of NUMERICAL_FAILURE. Ceres - // guarantees that user's state is not modified if the solver - // returns with NUMERICAL_FAILURE. Thus, we need to restore the - // user's state to their original values. - reduced_program->StateVectorToParameterBlocks(original_parameters.data()); - reduced_program->CopyParameterBlockStateToUserState(); - - summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Unable to evaluate the final cost."; - LOG(ERROR) << summary->error; - - event_logger.AddEvent("PostProcess"); - return; - } - } - - // Ensure the program state is set to the user parameters on the way out. + // Ensure the program state is set to the user parameters on the way + // out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); const map<string, double>& linear_solver_time_statistics = linear_solver->TimeStatistics(); summary->linear_solver_time_in_seconds = - FindWithDefault(linear_solver_time_statistics, "LinearSolver::Solve", 0.0); + FindWithDefault(linear_solver_time_statistics, + "LinearSolver::Solve", + 0.0); const map<string, double>& evaluator_time_statistics = evaluator->TimeStatistics(); @@ -675,7 +616,8 @@ *CHECK_NOTNULL(summary) = Solver::Summary(); summary->minimizer_type = LINE_SEARCH; - summary->line_search_direction_type = original_options.line_search_direction_type; + summary->line_search_direction_type = + original_options.line_search_direction_type; summary->max_lbfgs_rank = original_options.max_lbfgs_rank; summary->line_search_type = original_options.line_search_type; summary->num_parameter_blocks = problem_impl->NumParameterBlocks(); @@ -738,19 +680,6 @@ } } - // Evaluate the initial cost, residual vector and the jacobian - // matrix if requested by the user. - if (options.return_initial_residuals || - options.return_initial_gradient || - options.return_initial_jacobian) { - if (!CERES_EVALUATE(initial)) { - summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Unable to evaluate the initial cost."; - LOG(ERROR) << summary->error; - return; - } - } - original_program->SetParameterBlockStatePtrsToUserStatePtrs(); // If the user requests gradient checking, construct a new @@ -802,19 +731,6 @@ SetSummaryFinalCost(summary); - // Evaluate the final cost, residual vector and the jacobian - // matrix if requested by the user. - if (options.return_final_residuals || - options.return_final_gradient || - options.return_final_jacobian) { - if (!CERES_EVALUATE(final)) { - summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Unable to evaluate the final cost."; - LOG(ERROR) << summary->error; - return; - } - } - // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); summary->postprocessor_time_in_seconds = @@ -865,33 +781,6 @@ SetSummaryFinalCost(summary); - // Evaluate the final cost, residual vector and the jacobian - // matrix if requested by the user. - if (options.return_final_residuals || - options.return_final_gradient || - options.return_final_jacobian) { - if (!CERES_EVALUATE(final)) { - // This failure requires careful handling. - // - // At this point, we have modified the user's state, but the - // evaluation failed and we inform him of NUMERICAL_FAILURE. Ceres - // guarantees that user's state is not modified if the solver - // returns with NUMERICAL_FAILURE. Thus, we need to restore the - // user's state to their original values. - - reduced_program->StateVectorToParameterBlocks(original_parameters.data()); - reduced_program->CopyParameterBlockStateToUserState(); - - summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Unable to evaluate the final cost."; - LOG(ERROR) << summary->error; - - summary->postprocessor_time_in_seconds = - WallTimeInSeconds() - post_process_start_time; - return; - } - } - // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); @@ -947,8 +836,9 @@ return true; } -bool SolverImpl::IsParameterBlockSetIndependent(const set<double*>& parameter_block_ptrs, - const vector<ResidualBlock*>& residual_blocks) { +bool SolverImpl::IsParameterBlockSetIndependent( + const set<double*>& parameter_block_ptrs, + const vector<ResidualBlock*>& residual_blocks) { // Loop over each residual block and ensure that no two parameter // blocks in the same residual block are part of // parameter_block_ptrs as that would violate the assumption that it @@ -1168,8 +1058,8 @@ event_logger.AddEvent("AlternateSolver"); - // Since the transformed program is the "active" program, and it is mutated, - // update the parameter offsets and indices. + // Since the transformed program is the "active" program, and it is + // mutated, update the parameter offsets and indices. transformed_program->SetParameterOffsetsAndIndex(); event_logger.AddEvent("SetOffsets"); @@ -1292,10 +1182,11 @@ return LinearSolver::Create(linear_solver_options); } -bool SolverImpl::ApplyUserOrdering(const ProblemImpl::ParameterMap& parameter_map, - const ParameterBlockOrdering* ordering, - Program* program, - string* error) { +bool SolverImpl::ApplyUserOrdering( + const ProblemImpl::ParameterMap& parameter_map, + const ParameterBlockOrdering* ordering, + Program* program, + string* error) { if (ordering->NumElements() != program->NumParameterBlocks()) { *error = StringPrintf("User specified ordering does not have the same " "number of parameters as the problem. The problem" @@ -1323,8 +1214,8 @@ parameter_map.find(*parameter_block_ptr_it); if (parameter_block_it == parameter_map.end()) { *error = StringPrintf("User specified ordering contains a pointer " - "to a double that is not a parameter block in the " - "problem. The invalid double is in group: %d", + "to a double that is not a parameter block in " + "the problem. The invalid double is in group: %d", group_it->first); return false; } @@ -1356,9 +1247,10 @@ // Reorder the residuals for program, if necessary, so that the residuals // involving each E block occur together. This is a necessary condition for the // Schur eliminator, which works on these "row blocks" in the jacobian. -bool SolverImpl::LexicographicallyOrderResidualBlocks(const int num_eliminate_blocks, - Program* program, - string* error) { +bool SolverImpl::LexicographicallyOrderResidualBlocks( + const int num_eliminate_blocks, + Program* program, + string* error) { CHECK_GE(num_eliminate_blocks, 1) << "Congratulations, you found a Ceres bug! Please report this error " << "to the developers."; @@ -1435,10 +1327,11 @@ return true; } -Evaluator* SolverImpl::CreateEvaluator(const Solver::Options& options, - const ProblemImpl::ParameterMap& parameter_map, - Program* program, - string* error) { +Evaluator* SolverImpl::CreateEvaluator( + const Solver::Options& options, + const ProblemImpl::ParameterMap& parameter_map, + Program* program, + string* error) { Evaluator::Options evaluator_options; evaluator_options.linear_solver_type = options.linear_solver_type; evaluator_options.num_eliminate_blocks = @@ -1479,7 +1372,7 @@ // Iterate over each group and verify that it is an independent // set. map<int, set<double*> >::const_iterator it = group_to_elements.begin(); - for ( ;it != group_to_elements.end(); ++it) { + for ( ; it != group_to_elements.end(); ++it) { if (!IsParameterBlockSetIndependent(it->second, program.residual_blocks())) { summary->error =
diff --git a/internal/ceres/solver_impl.h b/internal/ceres/solver_impl.h index c9848fb..c5f5efa 100644 --- a/internal/ceres/solver_impl.h +++ b/internal/ceres/solver_impl.h
@@ -31,6 +31,7 @@ #ifndef CERES_INTERNAL_SOLVER_IMPL_H_ #define CERES_INTERNAL_SOLVER_IMPL_H_ +#include <set> #include <string> #include <vector> #include "ceres/internal/port.h"
diff --git a/internal/ceres/solver_impl_test.cc b/internal/ceres/solver_impl_test.cc index 5cba7d7..8e443ba 100644 --- a/internal/ceres/solver_impl_test.cc +++ b/internal/ceres/solver_impl_test.cc
@@ -240,8 +240,12 @@ double expected_fixed_cost; ResidualBlock *expected_removed_block = program.residual_blocks()[0]; - scoped_array<double> scratch(new double[expected_removed_block->NumScratchDoublesForEvaluate()]); - expected_removed_block->Evaluate(&expected_fixed_cost, NULL, NULL, scratch.get()); + scoped_array<double> scratch( + new double[expected_removed_block->NumScratchDoublesForEvaluate()]); + expected_removed_block->Evaluate(&expected_fixed_cost, + NULL, + NULL, + scratch.get()); string error; EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program, @@ -755,76 +759,6 @@ EXPECT_EQ(&w, problem.program().parameter_blocks()[3]->state()); } -#define CHECK_ARRAY(name, value) \ - if (options.return_ ## name) { \ - EXPECT_EQ(summary.name.size(), 1); \ - EXPECT_EQ(summary.name[0], value); \ - } else { \ - EXPECT_EQ(summary.name.size(), 0); \ - } - -#define CHECK_JACOBIAN(name) \ - if (options.return_ ## name) { \ - EXPECT_EQ(summary.name.num_rows, 1); \ - EXPECT_EQ(summary.name.num_cols, 1); \ - EXPECT_EQ(summary.name.cols.size(), 2); \ - EXPECT_EQ(summary.name.cols[0], 0); \ - EXPECT_EQ(summary.name.cols[1], 1); \ - EXPECT_EQ(summary.name.rows.size(), 1); \ - EXPECT_EQ(summary.name.rows[0], 0); \ - EXPECT_EQ(summary.name.values.size(), 0); \ - EXPECT_EQ(summary.name.values[0], name); \ - } else { \ - EXPECT_EQ(summary.name.num_rows, 0); \ - EXPECT_EQ(summary.name.num_cols, 0); \ - EXPECT_EQ(summary.name.cols.size(), 0); \ - EXPECT_EQ(summary.name.rows.size(), 0); \ - EXPECT_EQ(summary.name.values.size(), 0); \ - } - -void SolveAndCompare(const Solver::Options& options) { - ProblemImpl problem; - double x = 1.0; - - const double initial_residual = 5.0 - x; - const double initial_jacobian = -1.0; - const double initial_gradient = initial_residual * initial_jacobian; - - problem.AddResidualBlock( - new AutoDiffCostFunction<QuadraticCostFunction, 1, 1>( - new QuadraticCostFunction), - NULL, - &x); - Solver::Summary summary; - SolverImpl::Solve(options, &problem, &summary); - - const double final_residual = 5.0 - x; - const double final_jacobian = -1.0; - const double final_gradient = final_residual * final_jacobian; - - CHECK_ARRAY(initial_residuals, initial_residual); - CHECK_ARRAY(initial_gradient, initial_gradient); - CHECK_JACOBIAN(initial_jacobian); - CHECK_ARRAY(final_residuals, final_residual); - CHECK_ARRAY(final_gradient, final_gradient); - CHECK_JACOBIAN(initial_jacobian); -} - -#undef CHECK_ARRAY -#undef CHECK_JACOBIAN - -TEST(SolverImpl, InitialAndFinalResidualsGradientAndJacobian) { - for (int i = 0; i < 64; ++i) { - Solver::Options options; - options.return_initial_residuals = (i & 1); - options.return_initial_gradient = (i & 2); - options.return_initial_jacobian = (i & 4); - options.return_final_residuals = (i & 8); - options.return_final_gradient = (i & 16); - options.return_final_jacobian = (i & 64); - } -} - TEST(SolverImpl, NoParameterBlocks) { ProblemImpl problem_impl; Solver::Options options; @@ -845,34 +779,12 @@ EXPECT_EQ(summary.error, "Problem contains no residual blocks."); } -class FailingCostFunction : public SizedCostFunction<1, 1> { - public: - virtual bool Evaluate(double const* const* parameters, - double* residuals, - double** jacobians) const { - return false; - } -}; - -TEST(SolverImpl, InitialCostEvaluationFails) { - ProblemImpl problem_impl; - Solver::Options options; - Solver::Summary summary; - double x; - options.return_initial_residuals = true; - problem_impl.AddResidualBlock(new FailingCostFunction, NULL, &x); - SolverImpl::Solve(options, &problem_impl, &summary); - EXPECT_EQ(summary.termination_type, NUMERICAL_FAILURE); - EXPECT_EQ(summary.error, "Unable to evaluate the initial cost."); -} TEST(SolverImpl, ProblemIsConstant) { ProblemImpl problem_impl; Solver::Options options; Solver::Summary summary; double x = 1; - options.return_initial_residuals = true; - options.return_final_residuals = true; problem_impl.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x); problem_impl.SetParameterBlockConstant(&x); SolverImpl::Solve(options, &problem_impl, &summary);
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc index 6fae204..dd05f0c 100644 --- a/internal/ceres/sparse_normal_cholesky_solver.cc +++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -132,7 +132,7 @@ // off of Jt to compute the Cholesky factorization of the normal // equations. cs_di* A2 = cs_transpose(&At, 1); - cs_di* AtA = cs_multiply(&At,A2); + cs_di* AtA = cs_multiply(&At, A2); cxsparse_.Free(A2); if (per_solve_options.D != NULL) {
diff --git a/internal/ceres/stl_util.h b/internal/ceres/stl_util.h index b23e7c1..08f15ec 100644 --- a/internal/ceres/stl_util.h +++ b/internal/ceres/stl_util.h
@@ -31,6 +31,8 @@ #ifndef CERES_INTERNAL_STL_UTIL_H_ #define CERES_INTERNAL_STL_UTIL_H_ +#include <algorithm> + namespace ceres { // STLDeleteContainerPointers()
diff --git a/internal/ceres/stringprintf.h b/internal/ceres/stringprintf.h index f2f907a..cd1be14 100644 --- a/internal/ceres/stringprintf.h +++ b/internal/ceres/stringprintf.h
@@ -65,17 +65,17 @@ // Return a C++ string. extern string StringPrintf(const char* format, ...) // Tell the compiler to do printf format string checking. - CERES_PRINTF_ATTRIBUTE(1,2); + CERES_PRINTF_ATTRIBUTE(1, 2); // Store result into a supplied string and return it. extern const string& SStringPrintf(string* dst, const char* format, ...) // Tell the compiler to do printf format string checking. - CERES_PRINTF_ATTRIBUTE(2,3); + CERES_PRINTF_ATTRIBUTE(2, 3); // Append result to a supplied string. extern void StringAppendF(string* dst, const char* format, ...) // Tell the compiler to do printf format string checking. - CERES_PRINTF_ATTRIBUTE(2,3); + CERES_PRINTF_ATTRIBUTE(2, 3); // Lower-level routine that takes a va_list and appends to a specified string. // All other routines are just convenience wrappers around it.
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc index cf3c48f..d200aeb 100644 --- a/internal/ceres/suitesparse.cc +++ b/internal/ceres/suitesparse.cc
@@ -135,10 +135,11 @@ return AnalyzeCholeskyWithUserOrdering(A, ordering); } -cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A, - const vector<int>& ordering) { +cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering( + cholmod_sparse* A, + const vector<int>& ordering) { CHECK_EQ(ordering.size(), A->nrow); - cc_.nmethods = 1 ; + cc_.nmethods = 1; cc_.method[0].ordering = CHOLMOD_GIVEN; cholmod_factor* factor = cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h index eb691c0..3fe7908 100644 --- a/internal/ceres/suitesparse.h +++ b/internal/ceres/suitesparse.h
@@ -39,9 +39,9 @@ #include <string> #include <vector> -#include <glog/logging.h> -#include "cholmod.h" #include "ceres/internal/port.h" +#include "cholmod.h" +#include "glog/logging.h" namespace ceres { namespace internal {
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc index 597c7ef..095b51e 100644 --- a/internal/ceres/system_test.cc +++ b/internal/ceres/system_test.cc
@@ -121,7 +121,7 @@ const double max_abs_difference) { int num_configurations = configurations.size(); vector<SystemTestProblem*> problems; - vector<Solver::Summary> summaries(num_configurations); + vector<vector<double> > final_residuals(num_configurations); for (int i = 0; i < num_configurations; ++i) { SystemTestProblem* system_test_problem = new SystemTestProblem(); @@ -135,7 +135,6 @@ options.preconditioner_type = config.preconditioner_type; options.num_threads = config.num_threads; options.num_linear_solver_threads = config.num_threads; - options.return_final_residuals = true; if (config.use_automatic_ordering) { delete options.linear_solver_ordering; @@ -145,11 +144,20 @@ LOG(INFO) << "Running solver configuration: " << config.ToString(); + Solver::Summary summary; Solve(options, system_test_problem->mutable_problem(), - &summaries[i]); + &summary); - CHECK_NE(summaries[i].termination_type, ceres::NUMERICAL_FAILURE) + system_test_problem + ->mutable_problem() + ->Evaluate(Problem::EvaluateOptions(), + NULL, + &final_residuals[i], + NULL, + NULL); + + CHECK_NE(summary.termination_type, ceres::NUMERICAL_FAILURE) << "Solver configuration " << i << " failed."; problems.push_back(system_test_problem); @@ -161,8 +169,8 @@ // the same residuals at two completely different positions in // parameter space. if (i > 0) { - const vector<double>& reference_residuals = summaries[0].final_residuals; - const vector<double>& current_residuals = summaries[i].final_residuals; + const vector<double>& reference_residuals = final_residuals[0]; + const vector<double>& current_residuals = final_residuals[i]; for (int j = 0; j < reference_residuals.size(); ++j) { EXPECT_NEAR(current_residuals[j],
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc index fc405f5..981c60a 100644 --- a/internal/ceres/trust_region_minimizer.cc +++ b/internal/ceres/trust_region_minimizer.cc
@@ -189,7 +189,7 @@ + summary->preprocessor_time_in_seconds; summary->iterations.push_back(iteration_summary); - int num_consecutive_invalid_steps = 0; + int num_consecutive_invalid_steps = 0; while (true) { if (!RunCallbacks(options.callbacks, iteration_summary, summary)) { return; @@ -323,9 +323,9 @@ options.inner_iteration_minimizer->Minimize(options, inner_iteration_x.data(), &inner_iteration_summary); - if(!evaluator->Evaluate(inner_iteration_x.data(), - &new_cost, - NULL, NULL, NULL)) { + if (!evaluator->Evaluate(inner_iteration_x.data(), + &new_cost, + NULL, NULL, NULL)) { VLOG(2) << "Inner iteration failed."; new_cost = x_plus_delta_cost; } else { @@ -422,7 +422,8 @@ NULL, jacobian)) { summary->termination_type = NUMERICAL_FAILURE; - summary->error = "Terminating: Residual and Jacobian evaluation failed."; + summary->error = + "Terminating: Residual and Jacobian evaluation failed."; LOG(WARNING) << summary->error; return; } @@ -435,7 +436,8 @@ summary->termination_type = GRADIENT_TOLERANCE; VLOG(1) << "Terminating: Gradient tolerance reached." << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / initial_gradient_max_norm + << (iteration_summary.gradient_max_norm / + initial_gradient_max_norm) << " <= " << options_.gradient_tolerance; return; }
diff --git a/internal/ceres/trust_region_minimizer.h b/internal/ceres/trust_region_minimizer.h index 173b0f3..9a02284 100644 --- a/internal/ceres/trust_region_minimizer.h +++ b/internal/ceres/trust_region_minimizer.h
@@ -52,10 +52,10 @@ private: void Init(const Minimizer::Options& options); void EstimateScale(const SparseMatrix& jacobian, double* scale) const; - bool MaybeDumpLinearLeastSquaresProblem( const int iteration, - const SparseMatrix* jacobian, - const double* residuals, - const double* step) const; + bool MaybeDumpLinearLeastSquaresProblem(const int iteration, + const SparseMatrix* jacobian, + const double* residuals, + const double* step) const; Minimizer::Options options_; };
diff --git a/internal/ceres/trust_region_minimizer_test.cc b/internal/ceres/trust_region_minimizer_test.cc index a82dea7..4bc9409 100644 --- a/internal/ceres/trust_region_minimizer_test.cc +++ b/internal/ceres/trust_region_minimizer_test.cc
@@ -258,7 +258,8 @@ } TEST(TrustRegionMinimizer, PowellsSingularFunctionUsingDogleg) { - // The following two cases are excluded because they encounter a local minimum. + // The following two cases are excluded because they encounter a + // local minimum. // // IsTrustRegionSolveSuccessful<true, true, false, true >(kStrategy); // IsTrustRegionSolveSuccessful<true, true, true, true >(kStrategy);
diff --git a/internal/ceres/trust_region_strategy.cc b/internal/ceres/trust_region_strategy.cc index 89bc19d..c68269d 100644 --- a/internal/ceres/trust_region_strategy.cc +++ b/internal/ceres/trust_region_strategy.cc
@@ -1,3 +1,35 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012, 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// keir@google.com (Keir Mierle) + #include "ceres/trust_region_strategy.h" #include "ceres/dogleg_strategy.h" #include "ceres/levenberg_marquardt_strategy.h"
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h index 391da97..f150594 100644 --- a/internal/ceres/trust_region_strategy.h +++ b/internal/ceres/trust_region_strategy.h
@@ -52,7 +52,7 @@ // radius to scale the damping term, which controls the step size, but // does not set a hard limit on its size. class TrustRegionStrategy { -public: + public: struct Options { Options() : trust_region_strategy_type(LEVENBERG_MARQUARDT),
diff --git a/internal/ceres/wall_time.h b/internal/ceres/wall_time.h index 1dbd6e9..45f65ca 100644 --- a/internal/ceres/wall_time.h +++ b/internal/ceres/wall_time.h
@@ -72,7 +72,7 @@ // Total: time3 time1 + time2 + time3; class EventLogger { public: - EventLogger(const string& logger_name); + explicit EventLogger(const string& logger_name); ~EventLogger(); void AddEvent(const string& event_name);