Add Problem::EvaluateResidualBlock. This method gives the user the ability to evaluate a given residual block. A couple of minor cleanups. Problem::problem_impl_ -> Problem::impl_ NULL -> nullptr https://github.com/ceres-solver/ceres-solver/issues/417 Change-Id: I6dd94762c475fa264c387b8c93d516f6e06fe832
diff --git a/docs/source/nnls_modeling.rst b/docs/source/nnls_modeling.rst index faa3c26..299f859 100644 --- a/docs/source/nnls_modeling.rst +++ b/docs/source/nnls_modeling.rst
@@ -1753,6 +1753,39 @@ Get the :class:`LossFunction` for the given residual block. +.. function:: bool EvaluateResidualBlock(ResidualBlockId residual_block_id, bool apply_loss_function, double* cost,double* residuals, double** jacobians) const; + + Evaluates the residual block, storing the scalar cost in ``cost``, the + residual components in ``residuals``, and the jacobians between the + parameters and residuals in ``jacobians[i]``, in row-major order. + + If ``residuals`` is ``nullptr``, the residuals are not computed. + + If ``jacobians`` is ``nullptr``, no Jacobians are computed. If + ``jacobians[i]`` is ``nullptr``, then the Jacobian for that + parameter block is not computed. + + It is not okay to request the Jacobian w.r.t a parameter block + that is constant. + + The return value indicates the success or failure. Even if the + function returns false, the caller should expect the output + memory locations to have been modified. + + The returned cost and jacobians have had robustification and local + parameterizations applied already; for example, the jacobian for a + 4-dimensional quaternion parameter using the + :class:`QuaternionParameterization` is ``num_residuals x 3`` + instead of ``num_residuals x 4``. + + ``apply_loss_function`` as the name implies allows the user to + switch the application of the loss function on and off. + + .. NOTE:: + + TODO(sameeragarwal): Clarify interaction with IterationCallback + once that cleanup is done. + .. function:: bool Problem::Evaluate(const Problem::EvaluateOptions& options, double* cost, vector<double>* residuals, vector<double>* gradient, CRSMatrix* jacobian) Evaluate a :class:`Problem`. Any of the output pointers can be @@ -1840,6 +1873,7 @@ Number of threads to use. (Requires OpenMP). +.. ``rotation.h`` ==============
diff --git a/include/ceres/problem.h b/include/ceres/problem.h index 503e0fe..f8f1764 100644 --- a/include/ceres/problem.h +++ b/include/ceres/problem.h
@@ -114,8 +114,8 @@ // // Problem problem; // -// problem.AddResidualBlock(new MyUnaryCostFunction(...), NULL, x1); -// problem.AddResidualBlock(new MyBinaryCostFunction(...), NULL, x2, x3); +// problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1); +// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x3); // // Please see cost_function.h for details of the CostFunction object. class CERES_EXPORT Problem { @@ -161,7 +161,7 @@ // A Ceres global context to use for solving this problem. This may help to // reduce computation time as Ceres can reuse expensive objects to create. - // The context object can be NULL, in which case Ceres may create one. + // The context object can be nullptr, in which case Ceres may create one. // // Ceres does NOT take ownership of the pointer. Context* context = nullptr; @@ -181,7 +181,7 @@ // parameter blocks it expects. The function checks that these match // the sizes of the parameter blocks listed in parameter_blocks. The // program aborts if a mismatch is detected. loss_function can be - // NULL, in which case the cost of the term is just the squared norm + // nullptr, in which case the cost of the term is just the squared norm // of the residuals. // // The user has the option of explicitly adding the parameter blocks @@ -210,8 +210,8 @@ // // Problem problem; // - // problem.AddResidualBlock(new MyUnaryCostFunction(...), NULL, x1); - // problem.AddResidualBlock(new MyBinaryCostFunction(...), NULL, x2, x1); + // problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1); + // problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x1); // // Add a residual block by listing the parameter block pointers // directly instead of wapping them in a container. @@ -301,7 +301,7 @@ // Get the local parameterization object associated with this // parameter block. If there is no parameterization object - // associated then NULL is returned. + // associated then nullptr is returned. const LocalParameterization* GetParameterization(double* values) const; // Set the lower/upper bound for the parameter at position "index". @@ -361,7 +361,7 @@ const CostFunction* GetCostFunctionForResidualBlock( const ResidualBlockId residual_block) const; - // Get the LossFunction for the given residual block. Returns NULL + // Get the LossFunction for the given residual block. Returns nullptr // if no loss function is associated with this residual block. const LossFunction* GetLossFunctionForResidualBlock( const ResidualBlockId residual_block) const; @@ -415,7 +415,7 @@ int num_threads = 1; }; - // Evaluate Problem. Any of the output pointers can be NULL. Which + // Evaluate Problem. Any of the output pointers can be nullptr. Which // residual blocks and parameter blocks are used is controlled by // the EvaluateOptions struct above. // @@ -425,16 +425,16 @@ // // Problem problem; // double x = 1; - // problem.AddResidualBlock(new MyCostFunction, NULL, &x); + // problem.AddResidualBlock(new MyCostFunction, nullptr, &x); // // double cost = 0.0; - // problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL); + // problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr); // // The cost is evaluated at x = 1. If you wish to evaluate the // problem at x = 2, then // // x = 2; - // problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL); + // problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr); // // is the way to do so. // @@ -454,10 +454,43 @@ std::vector<double>* gradient, CRSMatrix* jacobian); + // Evaluates the residual block, storing the scalar cost in *cost, + // the residual components in *residuals, and the jacobians between + // the parameters and residuals in jacobians[i], in row-major order. + // + // If residuals is nullptr, the residuals are not computed. + // + // If jacobians is nullptr, no Jacobians are computed. If + // jacobians[i] is nullptr, then the Jacobian for that parameter + // block is not computed. + // + // It is not okay to request the Jacobian w.r.t a parameter block + // that is constant. + // + // The return value indicates the success or failure. Even if the + // function returns false, the caller should expect the output + // memory locations to have been modified. + // + // The returned cost and jacobians have had robustification and + // local parameterizations applied already; for example, the + // jacobian for a 4-dimensional quaternion parameter using the + // "QuaternionParameterization" is num_residuals by 3 instead of + // num_residuals by 4. + // + // apply_loss_function as the name implies allows the user to switch + // the application of the loss function on and off. + // + // TODO(sameeragarwal): Clarify interaction with IterationCallback + // once that cleanup is done. + bool EvaluateResidualBlock(ResidualBlockId residual_block_id, + bool apply_loss_function, + double* cost, + double* residuals, + double** jacobians) const; private: friend class Solver; friend class Covariance; - std::unique_ptr<internal::ProblemImpl> problem_impl_; + std::unique_ptr<internal::ProblemImpl> impl_; }; } // namespace ceres
diff --git a/internal/ceres/covariance.cc b/internal/ceres/covariance.cc index 068cd9c..ee29595 100644 --- a/internal/ceres/covariance.cc +++ b/internal/ceres/covariance.cc
@@ -52,13 +52,13 @@ bool Covariance::Compute( const vector<pair<const double*, const double*>>& covariance_blocks, Problem* problem) { - return impl_->Compute(covariance_blocks, problem->problem_impl_.get()); + return impl_->Compute(covariance_blocks, problem->impl_.get()); } bool Covariance::Compute( const vector<const double*>& parameter_blocks, Problem* problem) { - return impl_->Compute(parameter_blocks, problem->problem_impl_.get()); + return impl_->Compute(parameter_blocks, problem->impl_.get()); } bool Covariance::GetCovarianceBlock(const double* parameter_block1,
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc index 6939b46..110bb14 100644 --- a/internal/ceres/problem.cc +++ b/internal/ceres/problem.cc
@@ -32,6 +32,7 @@ #include "ceres/problem.h" #include <vector> + #include "ceres/crs_matrix.h" #include "ceres/problem_impl.h" @@ -39,92 +40,87 @@ using std::vector; -Problem::Problem() : problem_impl_(new internal::ProblemImpl) {} +Problem::Problem() : impl_(new internal::ProblemImpl) {} Problem::Problem(const Problem::Options& options) - : problem_impl_(new internal::ProblemImpl(options)) {} + : impl_(new internal::ProblemImpl(options)) {} Problem::~Problem() {} ResidualBlockId Problem::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, const vector<double*>& parameter_blocks) { - return problem_impl_->AddResidualBlock( - cost_function, - loss_function, - parameter_blocks.data(), - static_cast<int>(parameter_blocks.size())); + return impl_->AddResidualBlock(cost_function, + loss_function, + parameter_blocks.data(), + static_cast<int>(parameter_blocks.size())); } -ResidualBlockId Problem::AddResidualBlock( - CostFunction* cost_function, - LossFunction* loss_function, - double* const* const parameter_blocks, - int num_parameter_blocks) { - return problem_impl_->AddResidualBlock(cost_function, - loss_function, - parameter_blocks, - num_parameter_blocks); +ResidualBlockId Problem::AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* const* const parameter_blocks, + int num_parameter_blocks) { + return impl_->AddResidualBlock( + cost_function, loss_function, parameter_blocks, num_parameter_blocks); } void Problem::AddParameterBlock(double* values, int size) { - problem_impl_->AddParameterBlock(values, size); + impl_->AddParameterBlock(values, size); } void Problem::AddParameterBlock(double* values, int size, LocalParameterization* local_parameterization) { - problem_impl_->AddParameterBlock(values, size, local_parameterization); + impl_->AddParameterBlock(values, size, local_parameterization); } void Problem::RemoveResidualBlock(ResidualBlockId residual_block) { - problem_impl_->RemoveResidualBlock(residual_block); + impl_->RemoveResidualBlock(residual_block); } void Problem::RemoveParameterBlock(double* values) { - problem_impl_->RemoveParameterBlock(values); + impl_->RemoveParameterBlock(values); } void Problem::SetParameterBlockConstant(double* values) { - problem_impl_->SetParameterBlockConstant(values); + impl_->SetParameterBlockConstant(values); } void Problem::SetParameterBlockVariable(double* values) { - problem_impl_->SetParameterBlockVariable(values); + impl_->SetParameterBlockVariable(values); } bool Problem::IsParameterBlockConstant(double* values) const { - return problem_impl_->IsParameterBlockConstant(values); + return impl_->IsParameterBlockConstant(values); } void Problem::SetParameterization( - double* values, - LocalParameterization* local_parameterization) { - problem_impl_->SetParameterization(values, local_parameterization); + double* values, LocalParameterization* local_parameterization) { + impl_->SetParameterization(values, local_parameterization); } const LocalParameterization* Problem::GetParameterization( double* values) const { - return problem_impl_->GetParameterization(values); + return impl_->GetParameterization(values); } void Problem::SetParameterLowerBound(double* values, int index, double lower_bound) { - problem_impl_->SetParameterLowerBound(values, index, lower_bound); + impl_->SetParameterLowerBound(values, index, lower_bound); } void Problem::SetParameterUpperBound(double* values, int index, double upper_bound) { - problem_impl_->SetParameterUpperBound(values, index, upper_bound); + impl_->SetParameterUpperBound(values, index, upper_bound); } double Problem::GetParameterUpperBound(double* values, int index) const { - return problem_impl_->GetParameterUpperBound(values, index); + return impl_->GetParameterUpperBound(values, index); } double Problem::GetParameterLowerBound(double* values, int index) const { - return problem_impl_->GetParameterLowerBound(values, index); + return impl_->GetParameterLowerBound(values, index); } bool Problem::Evaluate(const EvaluateOptions& evaluate_options, @@ -132,72 +128,66 @@ vector<double>* residuals, vector<double>* gradient, CRSMatrix* jacobian) { - return problem_impl_->Evaluate(evaluate_options, - cost, - residuals, - gradient, - jacobian); + return impl_->Evaluate(evaluate_options, cost, residuals, gradient, jacobian); } -int Problem::NumParameterBlocks() const { - return problem_impl_->NumParameterBlocks(); +bool Problem::EvaluateResidualBlock(ResidualBlockId residual_block_id, + bool apply_loss_function, + double* cost, + double* residuals, + double** jacobians) const { + return impl_->EvaluateResidualBlock( + residual_block_id, apply_loss_function, cost, residuals, jacobians); } -int Problem::NumParameters() const { - return problem_impl_->NumParameters(); -} +int Problem::NumParameterBlocks() const { return impl_->NumParameterBlocks(); } -int Problem::NumResidualBlocks() const { - return problem_impl_->NumResidualBlocks(); -} +int Problem::NumParameters() const { return impl_->NumParameters(); } -int Problem::NumResiduals() const { - return problem_impl_->NumResiduals(); -} +int Problem::NumResidualBlocks() const { return impl_->NumResidualBlocks(); } + +int Problem::NumResiduals() const { return impl_->NumResiduals(); } int Problem::ParameterBlockSize(const double* parameter_block) const { - return problem_impl_->ParameterBlockSize(parameter_block); + return impl_->ParameterBlockSize(parameter_block); } int Problem::ParameterBlockLocalSize(const double* parameter_block) const { - return problem_impl_->ParameterBlockLocalSize(parameter_block); + return impl_->ParameterBlockLocalSize(parameter_block); } bool Problem::HasParameterBlock(const double* values) const { - return problem_impl_->HasParameterBlock(values); + return impl_->HasParameterBlock(values); } void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const { - problem_impl_->GetParameterBlocks(parameter_blocks); + impl_->GetParameterBlocks(parameter_blocks); } void Problem::GetResidualBlocks( vector<ResidualBlockId>* residual_blocks) const { - problem_impl_->GetResidualBlocks(residual_blocks); + impl_->GetResidualBlocks(residual_blocks); } void Problem::GetParameterBlocksForResidualBlock( const ResidualBlockId residual_block, vector<double*>* parameter_blocks) const { - problem_impl_->GetParameterBlocksForResidualBlock(residual_block, - parameter_blocks); + impl_->GetParameterBlocksForResidualBlock(residual_block, parameter_blocks); } const CostFunction* Problem::GetCostFunctionForResidualBlock( const ResidualBlockId residual_block) const { - return problem_impl_->GetCostFunctionForResidualBlock(residual_block); + return impl_->GetCostFunctionForResidualBlock(residual_block); } const LossFunction* Problem::GetLossFunctionForResidualBlock( const ResidualBlockId residual_block) const { - return problem_impl_->GetLossFunctionForResidualBlock(residual_block); + return impl_->GetLossFunctionForResidualBlock(residual_block); } void Problem::GetResidualBlocksForParameterBlock( - const double* values, - vector<ResidualBlockId>* residual_blocks) const { - problem_impl_->GetResidualBlocksForParameterBlock(values, - residual_blocks); + const double* values, vector<ResidualBlockId>* residual_blocks) const { + impl_->GetResidualBlocksForParameterBlock(values, residual_blocks); } } // namespace ceres
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc index 40d5aa2..587aef6 100644 --- a/internal/ceres/problem_impl.cc +++ b/internal/ceres/problem_impl.cc
@@ -41,6 +41,7 @@ #include <utility> #include <vector> +#include "ceres/internal/fixed_array.h" #include "ceres/casts.h" #include "ceres/compressed_row_jacobian_writer.h" #include "ceres/compressed_row_sparse_matrix.h" @@ -771,6 +772,37 @@ return status; } +bool ProblemImpl::EvaluateResidualBlock(ResidualBlock* residual_block, + bool apply_loss_function, + double* cost, + double* residuals, + double** jacobians) const { + ParameterBlock* const* parameter_blocks = residual_block->parameter_blocks(); + const int num_parameter_blocks = residual_block->NumParameterBlocks(); + for (int i = 0; i < num_parameter_blocks; ++i) { + ParameterBlock* parameter_block = parameter_blocks[i]; + if (parameter_block->IsConstant()) { + if (jacobians != nullptr && jacobians[i] != nullptr) { + LOG(ERROR) << "Jacobian requested for parameter block : " << i + << ". But the parameter block is marked constant."; + return false; + } + } else { + CHECK(parameter_block->SetState(parameter_block->user_state())) + << "Congratulations, you found a Ceres bug! Please report this error " + << "to the developers."; + } + } + + double dummy_cost = 0.0; + FixedArray<double> scratch(residual_block->NumScratchDoublesForEvaluate()); + return residual_block->Evaluate(apply_loss_function, + cost ? cost : &dummy_cost, + residuals, + jacobians, + scratch.data()); +} + int ProblemImpl::NumParameterBlocks() const { return program_->NumParameterBlocks(); }
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h index eabeaed..d0fab3f 100644 --- a/internal/ceres/problem_impl.h +++ b/internal/ceres/problem_impl.h
@@ -122,6 +122,12 @@ std::vector<double>* gradient, CRSMatrix* jacobian); + bool EvaluateResidualBlock(ResidualBlock* residual_block, + bool apply_loss_function, + double* cost, + double* residuals, + double** jacobians) const; + int NumParameterBlocks() const; int NumParameters() const; int NumResidualBlocks() const;
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc index 736e295..03e8da7 100644 --- a/internal/ceres/problem_test.cc +++ b/internal/ceres/problem_test.cc
@@ -30,9 +30,10 @@ // keir@google.com (Keir Mierle) #include "ceres/problem.h" -#include "ceres/problem_impl.h" #include <memory> + +#include "ceres/autodiff_cost_function.h" #include "ceres/casts.h" #include "ceres/cost_function.h" #include "ceres/crs_matrix.h" @@ -42,6 +43,7 @@ #include "ceres/loss_function.h" #include "ceres/map_util.h" #include "ceres/parameter_block.h" +#include "ceres/problem_impl.h" #include "ceres/program.h" #include "ceres/sized_cost_function.h" #include "ceres/sparse_matrix.h" @@ -1528,6 +1530,498 @@ CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected); } +struct IdentityFunctor { + template <typename T> + bool operator()(const T* x, const T* y, T* residuals) const { + residuals[0] = x[0]; + residuals[1] = x[1]; + residuals[2] = y[0]; + residuals[3] = y[1]; + residuals[4] = y[2]; + return true; + } + + static CostFunction* Create() { + return new AutoDiffCostFunction<IdentityFunctor, 5, 2, 3>( + new IdentityFunctor); + } +}; + +class ProblemEvaluateResidualBlockTest : public ::testing::Test { + public: + static constexpr bool kApplyLossFunction = true; + static constexpr bool kDoNotApplyLossFunction = false; + static double kLossFunctionScale; + + protected: + void SetUp() { + loss_function_ = new ScaledLoss(nullptr, 2.0, TAKE_OWNERSHIP); + } + + LossFunction* loss_function_; + ProblemImpl problem_; + double x_[2] = {1, 2}; + double y_[3] = {1, 2, 3}; +}; + +double ProblemEvaluateResidualBlockTest::kLossFunctionScale = 2.0; + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockNoLossFunctionFullEval) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Matrix expected_dfdx = Matrix::Zero(5, 2); + expected_dfdx.block(0, 0, 2, 2) = Matrix::Identity(2, 2); + Matrix expected_dfdy = Matrix::Zero(5, 3); + expected_dfdy.block(2, 0, 3, 3) = Matrix::Identity(3, 3); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + Matrix actual_dfdy(5, 3); + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdx - actual_dfdx).norm() / actual_dfdx.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdx; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockNoLossFunctionNullEval) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + EXPECT_TRUE(problem_.EvaluateResidualBlock( + residual_block_id, kApplyLossFunction, nullptr, nullptr, nullptr)); +} + +TEST_F(ProblemEvaluateResidualBlockTest, OneResidualBlockNoLossFunctionCost) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + EXPECT_TRUE(problem_.EvaluateResidualBlock( + residual_block_id, kApplyLossFunction, &actual_cost, nullptr, nullptr)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockNoLossFunctionCostAndResidual) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + nullptr)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockNoLossFunctionCostResidualAndOneJacobian) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Matrix expected_dfdx = Matrix::Zero(5, 2); + expected_dfdx.block(0, 0, 2, 2) = Matrix::Identity(2, 2); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + double* jacobians[2] = {actual_dfdx.data(), nullptr}; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdx - actual_dfdx).norm() / actual_dfdx.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdx; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockNoLossFunctionResidual) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Vector actual_f(5); + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + nullptr, + actual_f.data(), + nullptr)); + + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; +} + +TEST_F(ProblemEvaluateResidualBlockTest, OneResidualBlockWithLossFunction) { + ResidualBlockId residual_block_id = problem_.AddResidualBlock( + IdentityFunctor::Create(), loss_function_, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + expected_f *= std::sqrt(kLossFunctionScale); + Matrix expected_dfdx = Matrix::Zero(5, 2); + expected_dfdx.block(0, 0, 2, 2) = Matrix::Identity(2, 2); + expected_dfdx *= std::sqrt(kLossFunctionScale); + Matrix expected_dfdy = Matrix::Zero(5, 3); + expected_dfdy.block(2, 0, 3, 3) = Matrix::Identity(3, 3); + expected_dfdy *= std::sqrt(kLossFunctionScale); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + Matrix actual_dfdy(5, 3); + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdx - actual_dfdx).norm() / actual_dfdx.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdx; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockWithLossFunctionDisabled) { + ResidualBlockId residual_block_id = problem_.AddResidualBlock( + IdentityFunctor::Create(), loss_function_, x_, y_); + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Matrix expected_dfdx = Matrix::Zero(5, 2); + expected_dfdx.block(0, 0, 2, 2) = Matrix::Identity(2, 2); + Matrix expected_dfdy = Matrix::Zero(5, 3); + expected_dfdy.block(2, 0, 3, 3) = Matrix::Identity(3, 3); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + Matrix actual_dfdy(5, 3); + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kDoNotApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdx - actual_dfdx).norm() / actual_dfdx.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdx; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockWithOneLocalParameterization) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + problem_.SetParameterization(x_, new SubsetParameterization(2, {1})); + + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Matrix expected_dfdx = Matrix::Zero(5, 1); + expected_dfdx.block(0, 0, 1, 1) = Matrix::Identity(1, 1); + Matrix expected_dfdy = Matrix::Zero(5, 3); + expected_dfdy.block(2, 0, 3, 3) = Matrix::Identity(3, 3); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 1); + Matrix actual_dfdy(5, 3); + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdx - actual_dfdx).norm() / actual_dfdx.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdx; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockWithTwoLocalParameterizations) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + problem_.SetParameterization(x_, new SubsetParameterization(2, {1})); + problem_.SetParameterization(y_, new SubsetParameterization(3, {2})); + + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Matrix expected_dfdx = Matrix::Zero(5, 1); + expected_dfdx.block(0, 0, 1, 1) = Matrix::Identity(1, 1); + Matrix expected_dfdy = Matrix::Zero(5, 2); + expected_dfdy.block(2, 0, 2, 2) = Matrix::Identity(2, 2); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 1); + Matrix actual_dfdy(5, 2); + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdx - actual_dfdx).norm() / actual_dfdx.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdx; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockWithOneConstantParameterBlock) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + problem_.SetParameterBlockConstant(x_); + + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + Matrix expected_dfdy = Matrix::Zero(5, 3); + expected_dfdy.block(2, 0, 3, 3) = Matrix::Identity(3, 3); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + Matrix actual_dfdy(5, 3); + + // Try evaluating both Jacobians, this should fail. + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_FALSE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + jacobians[0] = nullptr; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockWithAllConstantParameterBlocks) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + problem_.SetParameterBlockConstant(x_); + problem_.SetParameterBlockConstant(y_); + + Vector expected_f(5); + expected_f << 1, 2, 1, 2, 3; + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + Matrix actual_dfdy(5, 3); + + // Try evaluating with one or more Jacobians, this should fail. + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_FALSE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + jacobians[0] = nullptr; + EXPECT_FALSE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + jacobians[1] = nullptr; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; +} + +TEST_F(ProblemEvaluateResidualBlockTest, + OneResidualBlockWithOneParameterBlockConstantAndParameterBlockChanged) { + ResidualBlockId residual_block_id = + problem_.AddResidualBlock(IdentityFunctor::Create(), nullptr, x_, y_); + problem_.SetParameterBlockConstant(x_); + + x_[0] = 2; + y_[2] = 1; + Vector expected_f(5); + expected_f << 2, 2, 1, 2, 1; + Matrix expected_dfdy = Matrix::Zero(5, 3); + expected_dfdy.block(2, 0, 3, 3) = Matrix::Identity(3, 3); + double expected_cost = expected_f.squaredNorm() / 2.0; + + double actual_cost; + Vector actual_f(5); + Matrix actual_dfdx(5, 2); + Matrix actual_dfdy(5, 3); + + // Try evaluating with one or more Jacobians, this should fail. + double* jacobians[2] = {actual_dfdx.data(), actual_dfdy.data()}; + EXPECT_FALSE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + + jacobians[0] = nullptr; + EXPECT_TRUE(problem_.EvaluateResidualBlock(residual_block_id, + kApplyLossFunction, + &actual_cost, + actual_f.data(), + jacobians)); + EXPECT_NEAR(std::abs(expected_cost - actual_cost) / actual_cost, + 0, + std::numeric_limits<double>::epsilon()) + << actual_cost; + EXPECT_NEAR((expected_f - actual_f).norm() / actual_f.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_f; + EXPECT_NEAR((expected_dfdy - actual_dfdy).norm() / actual_dfdy.norm(), + 0, + std::numeric_limits<double>::epsilon()) + << actual_dfdy; +} + TEST(Problem, SetAndGetParameterLowerBound) { Problem problem; double x[] = {1.0, 2.0};
diff --git a/internal/ceres/residual_block.cc b/internal/ceres/residual_block.cc index 7bfcc29..0bf30bc 100644 --- a/internal/ceres/residual_block.cc +++ b/internal/ceres/residual_block.cc
@@ -80,11 +80,11 @@ // Put pointers into the scratch space into global_jacobians as appropriate. FixedArray<double*, 8> global_jacobians(num_parameter_blocks); - if (jacobians != NULL) { + if (jacobians != nullptr) { for (int i = 0; i < num_parameter_blocks; ++i) { const ParameterBlock* parameter_block = parameter_blocks_[i]; - if (jacobians[i] != NULL && - parameter_block->LocalParameterizationJacobian() != NULL) { + if (jacobians[i] != nullptr && + parameter_block->LocalParameterizationJacobian() != nullptr) { global_jacobians[i] = scratch; scratch += num_residuals * parameter_block->Size(); } else { @@ -94,7 +94,7 @@ } // If the caller didn't request residuals, use the scratch space for them. - bool outputting_residuals = (residuals != NULL); + bool outputting_residuals = (residuals != nullptr); if (!outputting_residuals) { residuals = scratch; } @@ -103,7 +103,7 @@ // the CostFunction::Evaluate call, to see if all the return values // that were required were written to and that they are finite. double** eval_jacobians = - (jacobians != NULL) ? global_jacobians.data() : NULL; + (jacobians != nullptr) ? global_jacobians.data() : nullptr; InvalidateEvaluation(*this, cost, residuals, eval_jacobians); @@ -134,13 +134,13 @@ double squared_norm = VectorRef(residuals, num_residuals).squaredNorm(); // Update the jacobians with the local parameterizations. - if (jacobians != NULL) { + if (jacobians != nullptr) { for (int i = 0; i < num_parameter_blocks; ++i) { - if (jacobians[i] != NULL) { + if (jacobians[i] != nullptr) { const ParameterBlock* parameter_block = parameter_blocks_[i]; // Apply local reparameterization to the jacobians. - if (parameter_block->LocalParameterizationJacobian() != NULL) { + if (parameter_block->LocalParameterizationJacobian() != nullptr) { // jacobians[i] = global_jacobians[i] * global_to_local_jacobian. MatrixMatrixMultiply<Dynamic, Dynamic, Dynamic, Dynamic, 0>( global_jacobians[i], @@ -155,7 +155,7 @@ } } - if (loss_function_ == NULL || !apply_loss_function) { + if (loss_function_ == nullptr || !apply_loss_function) { *cost = 0.5 * squared_norm; return true; } @@ -166,16 +166,16 @@ // No jacobians and not outputting residuals? All done. Doing an early exit // here avoids constructing the "Corrector" object below in a common case. - if (jacobians == NULL && !outputting_residuals) { + if (jacobians == nullptr && !outputting_residuals) { return true; } // Correct for the effects of the loss function. The jacobians need to be // corrected before the residuals, since they use the uncorrected residuals. Corrector correct(squared_norm, rho); - if (jacobians != NULL) { + if (jacobians != nullptr) { for (int i = 0; i < num_parameter_blocks; ++i) { - if (jacobians[i] != NULL) { + if (jacobians[i] != nullptr) { const ParameterBlock* parameter_block = parameter_blocks_[i]; // Correct the jacobians for the loss function. @@ -205,8 +205,7 @@ int scratch_doubles = 1; for (int i = 0; i < num_parameters; ++i) { const ParameterBlock* parameter_block = parameter_blocks_[i]; - if (!parameter_block->IsConstant() && - parameter_block->LocalParameterizationJacobian() != NULL) { + if (parameter_block->LocalParameterizationJacobian() != nullptr) { scratch_doubles += parameter_block->Size(); } }
diff --git a/internal/ceres/residual_block.h b/internal/ceres/residual_block.h index 6964c6d..a2e4425 100644 --- a/internal/ceres/residual_block.h +++ b/internal/ceres/residual_block.h
@@ -82,6 +82,8 @@ // computed. If jacobians[i] is NULL, then the jacobian for that parameter is // not computed. // + // cost must not be null. + // // Evaluate needs scratch space which must be supplied by the caller via // scratch. The array should have at least NumScratchDoublesForEvaluate() // space available.
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc index f8ad2c9..65ddf5b 100644 --- a/internal/ceres/solver.cc +++ b/internal/ceres/solver.cc
@@ -491,7 +491,7 @@ return; } - ProblemImpl* problem_impl = problem->problem_impl_.get(); + ProblemImpl* problem_impl = problem->impl_.get(); Program* program = problem_impl->mutable_program(); PreSolveSummarize(options, problem_impl, summary); @@ -566,7 +566,7 @@ } const double postprocessor_start_time = WallTimeInSeconds(); - problem_impl = problem->problem_impl_.get(); + problem_impl = problem->impl_.get(); program = problem_impl->mutable_program(); // On exit, ensure that the parameter blocks again point at the user // provided values and the parameter blocks are numbered according