Fix a bunch of clang-tidy suggestions.
Change-Id: Ic7ce800ba33569c141d45ab6d27657987f303b93
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index 2180845..ee4afd9 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -117,7 +117,7 @@
covariance_blocks);
problem_ = problem;
parameter_block_to_row_index_.clear();
- covariance_matrix_.reset(NULL);
+ covariance_matrix_.reset(nullptr);
is_valid_ = (ComputeCovarianceSparsity(covariance_blocks, problem) &&
ComputeCovarianceValues());
is_computed_ = true;
@@ -222,7 +222,7 @@
// Fast path when there are no manifolds or if the user does not want it
// lifted to the ambient space.
- if ((manifold1 == NULL && manifold2 == NULL) ||
+ if ((manifold1 == nullptr && manifold2 == nullptr) ||
!lift_covariance_to_ambient_space) {
if (transpose) {
MatrixRef(covariance_block, block2_tangent_size, block1_tangent_size) =
@@ -252,7 +252,7 @@
// TODO(sameeragarwal): Add caching the manifold plus_jacobian, so that they
// are computed just once per parameter block.
Matrix block1_jacobian(block1_size, block1_tangent_size);
- if (manifold1 == NULL) {
+ if (manifold1 == nullptr) {
block1_jacobian.setIdentity();
} else {
manifold1->PlusJacobian(parameter_block1, block1_jacobian.data());
@@ -263,7 +263,7 @@
if (parameter_block1 == parameter_block2) {
block2_jacobian = block1_jacobian;
} else {
- if (manifold2 == NULL) {
+ if (manifold2 == nullptr) {
block2_jacobian.setIdentity();
} else {
manifold2->PlusJacobian(parameter_block2, block2_jacobian.data());
@@ -450,7 +450,7 @@
if (covariance_blocks.size() == 0) {
VLOG(2) << "No non-zero covariance blocks found";
- covariance_matrix_.reset(NULL);
+ covariance_matrix_.reset(nullptr);
return true;
}
@@ -558,13 +558,13 @@
"CovarianceImpl::ComputeCovarianceValuesUsingSparseQR");
#ifndef CERES_NO_SUITESPARSE
- if (covariance_matrix_.get() == NULL) {
+ if (covariance_matrix_.get() == nullptr) {
// Nothing to do, all zeros covariance matrix.
return true;
}
CRSMatrix jacobian;
- problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+ problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
event_logger.AddEvent("Evaluate");
// Construct a compressed column form of the Jacobian.
@@ -603,11 +603,11 @@
cholmod_jacobian.nrow = num_rows;
cholmod_jacobian.ncol = num_cols;
cholmod_jacobian.nzmax = num_nonzeros;
- cholmod_jacobian.nz = NULL;
+ cholmod_jacobian.nz = nullptr;
cholmod_jacobian.p = reinterpret_cast<void*>(&transpose_rows[0]);
cholmod_jacobian.i = reinterpret_cast<void*>(&transpose_cols[0]);
cholmod_jacobian.x = reinterpret_cast<void*>(&transpose_values[0]);
- cholmod_jacobian.z = NULL;
+ cholmod_jacobian.z = nullptr;
cholmod_jacobian.stype = 0; // Matrix is not symmetric.
cholmod_jacobian.itype = CHOLMOD_LONG;
cholmod_jacobian.xtype = CHOLMOD_REAL;
@@ -618,8 +618,8 @@
cholmod_common cc;
cholmod_l_start(&cc);
- cholmod_sparse* R = NULL;
- SuiteSparse_long* permutation = NULL;
+ cholmod_sparse* R = nullptr;
+ SuiteSparse_long* permutation = nullptr;
// Compute a Q-less QR factorization of the Jacobian. Since we are
// only interested in inverting J'J = R'R, we do not need Q. This
@@ -723,13 +723,13 @@
bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
EventLogger event_logger(
"CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD");
- if (covariance_matrix_.get() == NULL) {
+ if (covariance_matrix_.get() == nullptr) {
// Nothing to do, all zeros covariance matrix.
return true;
}
CRSMatrix jacobian;
- problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+ problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
event_logger.AddEvent("Evaluate");
Matrix dense_jacobian(jacobian.num_rows, jacobian.num_cols);
@@ -814,13 +814,13 @@
bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
EventLogger event_logger(
"CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR");
- if (covariance_matrix_.get() == NULL) {
+ if (covariance_matrix_.get() == nullptr) {
// Nothing to do, all zeros covariance matrix.
return true;
}
CRSMatrix jacobian;
- problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+ problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
event_logger.AddEvent("Evaluate");
typedef Eigen::SparseMatrix<double, Eigen::ColMajor> EigenSparseMatrix;
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
index 1f3a75a..4675ada 100644
--- a/internal/ceres/covariance_test.cc
+++ b/internal/ceres/covariance_test.cc
@@ -412,7 +412,7 @@
return true;
}
- bool Minus(const double* y, const double* x, double* y_minus_x) const {
+ bool Minus(const double* y, const double* x, double* y_minus_x) const final {
LOG(FATAL) << "Should not be called";
return true;
}
diff --git a/internal/ceres/gradient_checker_test.cc b/internal/ceres/gradient_checker_test.cc
index c7d08cb..26262ef 100644
--- a/internal/ceres/gradient_checker_test.cc
+++ b/internal/ceres/gradient_checker_test.cc
@@ -361,19 +361,19 @@
double* x_plus_delta) const final {
VectorRef(x_plus_delta, GlobalSize()) =
ConstVectorRef(x, GlobalSize()) +
- (global_J_local * ConstVectorRef(delta, LocalSize()));
+ (global_to_local_ * ConstVectorRef(delta, LocalSize()));
return true;
}
bool ComputeJacobian(const double* /*x*/, double* jacobian) const final {
- MatrixRef(jacobian, GlobalSize(), LocalSize()) = global_J_local;
+ MatrixRef(jacobian, GlobalSize(), LocalSize()) = global_to_local_;
return true;
}
- int GlobalSize() const final { return global_J_local.rows(); }
- int LocalSize() const final { return global_J_local.cols(); }
+ int GlobalSize() const final { return global_to_local_.rows(); }
+ int LocalSize() const final { return global_to_local_.cols(); }
- Matrix global_J_local;
+ Matrix global_to_local_;
};
TEST(GradientChecker, TestCorrectnessWithLocalParameterizations) {
@@ -421,27 +421,27 @@
ExpectMatricesClose(residual, residual_expected, kTolerance);
// Create parameterization.
- Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_J_local;
- global_J_local.row(0) << 1.5, 2.5;
- global_J_local.row(1) << 3.5, 4.5;
- global_J_local.row(2) << 5.5, 6.5;
+ Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_to_local;
+ global_to_local.row(0) << 1.5, 2.5;
+ global_to_local.row(1) << 3.5, 4.5;
+ global_to_local.row(2) << 5.5, 6.5;
MatrixParameterization parameterization;
- parameterization.global_J_local = global_J_local;
+ parameterization.global_to_local_ = global_to_local;
// Test local parameterization for correctness.
Eigen::Vector3d x(7.0, 8.0, 9.0);
Eigen::Vector2d delta(10.0, 11.0);
- Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_J_local_out;
- parameterization.ComputeJacobian(x.data(), global_J_local_out.data());
- ExpectMatricesClose(global_J_local_out,
- global_J_local,
+ Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_to_local_out;
+ parameterization.ComputeJacobian(x.data(), global_to_local_out.data());
+ ExpectMatricesClose(global_to_local_out,
+ global_to_local,
std::numeric_limits<double>::epsilon());
Eigen::Vector3d x_plus_delta;
parameterization.Plus(x.data(), delta.data(), x_plus_delta.data());
- Eigen::Vector3d x_plus_delta_expected = x + (global_J_local * delta);
+ Eigen::Vector3d x_plus_delta_expected = x + (global_to_local * delta);
ExpectMatricesClose(x_plus_delta, x_plus_delta_expected, kTolerance);
// Now test GradientChecker.
@@ -475,12 +475,12 @@
results.residuals, residual, std::numeric_limits<double>::epsilon());
CheckDimensions(results, parameter_sizes, local_parameter_sizes, 3);
ExpectMatricesClose(
- results.local_jacobians.at(0), j0 * global_J_local, kTolerance);
+ results.local_jacobians.at(0), j0 * global_to_local, kTolerance);
ExpectMatricesClose(results.local_jacobians.at(1),
j1,
std::numeric_limits<double>::epsilon());
ExpectMatricesClose(
- results.local_numeric_jacobians.at(0), j0 * global_J_local, kTolerance);
+ results.local_numeric_jacobians.at(0), j0 * global_to_local, kTolerance);
ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
ExpectMatricesClose(
results.jacobians.at(0), j0, std::numeric_limits<double>::epsilon());
@@ -523,13 +523,13 @@
ASSERT_EQ(results.local_jacobians.size(), 2);
ASSERT_EQ(results.local_numeric_jacobians.size(), 2);
ExpectMatricesClose(results.local_jacobians.at(0),
- (j0 + j0_offset) * global_J_local,
+ (j0 + j0_offset) * global_to_local,
kTolerance);
ExpectMatricesClose(results.local_jacobians.at(1),
j1,
std::numeric_limits<double>::epsilon());
ExpectMatricesClose(
- results.local_numeric_jacobians.at(0), j0 * global_J_local, kTolerance);
+ results.local_numeric_jacobians.at(0), j0 * global_to_local, kTolerance);
ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
ExpectMatricesClose(results.jacobians.at(0), j0 + j0_offset, kTolerance);
ExpectMatricesClose(
@@ -548,7 +548,7 @@
// Now, zero out the local parameterization Jacobian with respect to the 3rd
// component of the 1st parameter. This makes the combination of cost function
// and local parameterization return correct values again.
- parameterization.global_J_local.row(2).setZero();
+ parameterization.global_to_local_.row(2).setZero();
// Verify that the gradient checker does not treat this as an error.
EXPECT_TRUE(gradient_checker.Probe(parameters.data(), kTolerance, &results))
@@ -562,13 +562,13 @@
ASSERT_EQ(results.local_jacobians.size(), 2);
ASSERT_EQ(results.local_numeric_jacobians.size(), 2);
ExpectMatricesClose(results.local_jacobians.at(0),
- (j0 + j0_offset) * parameterization.global_J_local,
+ (j0 + j0_offset) * parameterization.global_to_local_,
kTolerance);
ExpectMatricesClose(results.local_jacobians.at(1),
j1,
std::numeric_limits<double>::epsilon());
ExpectMatricesClose(results.local_numeric_jacobians.at(0),
- j0 * parameterization.global_J_local,
+ j0 * parameterization.global_to_local_,
kTolerance);
ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
ExpectMatricesClose(results.jacobians.at(0), j0 + j0_offset, kTolerance);
@@ -596,16 +596,16 @@
double* x_plus_delta) const final {
VectorRef(x_plus_delta, AmbientSize()) =
ConstVectorRef(x, AmbientSize()) +
- (global_J_local * ConstVectorRef(delta, TangentSize()));
+ (global_to_local_ * ConstVectorRef(delta, TangentSize()));
return true;
}
bool PlusJacobian(const double* /*x*/, double* jacobian) const final {
- MatrixRef(jacobian, AmbientSize(), TangentSize()) = global_J_local;
+ MatrixRef(jacobian, AmbientSize(), TangentSize()) = global_to_local_;
return true;
}
- bool Minus(const double* y, const double* x, double* y_minus_x) const {
+ bool Minus(const double* y, const double* x, double* y_minus_x) const final {
LOG(FATAL) << "Should not be called";
return true;
}
@@ -615,10 +615,10 @@
return true;
}
- int AmbientSize() const final { return global_J_local.rows(); }
- int TangentSize() const final { return global_J_local.cols(); }
+ int AmbientSize() const final { return global_to_local_.rows(); }
+ int TangentSize() const final { return global_to_local_.cols(); }
- Matrix global_J_local;
+ Matrix global_to_local_;
};
TEST(GradientChecker, TestCorrectnessWithManifolds) {
@@ -666,27 +666,27 @@
ExpectMatricesClose(residual, residual_expected, kTolerance);
// Create manifold.
- Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_J_local;
- global_J_local.row(0) << 1.5, 2.5;
- global_J_local.row(1) << 3.5, 4.5;
- global_J_local.row(2) << 5.5, 6.5;
+ Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_to_local;
+ global_to_local.row(0) << 1.5, 2.5;
+ global_to_local.row(1) << 3.5, 4.5;
+ global_to_local.row(2) << 5.5, 6.5;
MatrixManifold manifold;
- manifold.global_J_local = global_J_local;
+ manifold.global_to_local_ = global_to_local;
// Test manifold for correctness.
Eigen::Vector3d x(7.0, 8.0, 9.0);
Eigen::Vector2d delta(10.0, 11.0);
- Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_J_local_out;
- manifold.PlusJacobian(x.data(), global_J_local_out.data());
- ExpectMatricesClose(global_J_local_out,
- global_J_local,
+ Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_to_local_out;
+ manifold.PlusJacobian(x.data(), global_to_local_out.data());
+ ExpectMatricesClose(global_to_local_out,
+ global_to_local,
std::numeric_limits<double>::epsilon());
Eigen::Vector3d x_plus_delta;
manifold.Plus(x.data(), delta.data(), x_plus_delta.data());
- Eigen::Vector3d x_plus_delta_expected = x + (global_J_local * delta);
+ Eigen::Vector3d x_plus_delta_expected = x + (global_to_local * delta);
ExpectMatricesClose(x_plus_delta, x_plus_delta_expected, kTolerance);
// Now test GradientChecker.
@@ -720,12 +720,12 @@
results.residuals, residual, std::numeric_limits<double>::epsilon());
CheckDimensions(results, parameter_sizes, tangent_sizes, 3);
ExpectMatricesClose(
- results.local_jacobians.at(0), j0 * global_J_local, kTolerance);
+ results.local_jacobians.at(0), j0 * global_to_local, kTolerance);
ExpectMatricesClose(results.local_jacobians.at(1),
j1,
std::numeric_limits<double>::epsilon());
ExpectMatricesClose(
- results.local_numeric_jacobians.at(0), j0 * global_J_local, kTolerance);
+ results.local_numeric_jacobians.at(0), j0 * global_to_local, kTolerance);
ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
ExpectMatricesClose(
results.jacobians.at(0), j0, std::numeric_limits<double>::epsilon());
@@ -768,13 +768,13 @@
ASSERT_EQ(results.local_jacobians.size(), 2);
ASSERT_EQ(results.local_numeric_jacobians.size(), 2);
ExpectMatricesClose(results.local_jacobians.at(0),
- (j0 + j0_offset) * global_J_local,
+ (j0 + j0_offset) * global_to_local,
kTolerance);
ExpectMatricesClose(results.local_jacobians.at(1),
j1,
std::numeric_limits<double>::epsilon());
ExpectMatricesClose(
- results.local_numeric_jacobians.at(0), j0 * global_J_local, kTolerance);
+ results.local_numeric_jacobians.at(0), j0 * global_to_local, kTolerance);
ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
ExpectMatricesClose(results.jacobians.at(0), j0 + j0_offset, kTolerance);
ExpectMatricesClose(
@@ -793,7 +793,7 @@
// Now, zero out the manifold Jacobian with respect to the 3rd component of
// the 1st parameter. This makes the combination of cost function and manifold
// return correct values again.
- manifold.global_J_local.row(2).setZero();
+ manifold.global_to_local_.row(2).setZero();
// Verify that the gradient checker does not treat this as an error.
EXPECT_TRUE(gradient_checker.Probe(parameters.data(), kTolerance, &results))
@@ -807,13 +807,13 @@
ASSERT_EQ(results.local_jacobians.size(), 2);
ASSERT_EQ(results.local_numeric_jacobians.size(), 2);
ExpectMatricesClose(results.local_jacobians.at(0),
- (j0 + j0_offset) * manifold.global_J_local,
+ (j0 + j0_offset) * manifold.global_to_local_,
kTolerance);
ExpectMatricesClose(results.local_jacobians.at(1),
j1,
std::numeric_limits<double>::epsilon());
ExpectMatricesClose(results.local_numeric_jacobians.at(0),
- j0 * manifold.global_J_local,
+ j0 * manifold.global_to_local_,
kTolerance);
ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
ExpectMatricesClose(results.jacobians.at(0), j0 + j0_offset, kTolerance);
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
index 8c47c35..d5b696a 100644
--- a/internal/ceres/gradient_checking_cost_function_test.cc
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -166,7 +166,7 @@
GradientCheckingIterationCallback callback;
std::unique_ptr<CostFunction> gradient_checking_cost_function(
CreateGradientCheckingCostFunction(&term,
- NULL,
+ nullptr,
kRelativeStepSize,
kRelativePrecision,
"Ignored.",
@@ -222,7 +222,7 @@
GradientCheckingIterationCallback callback;
std::unique_ptr<CostFunction> gradient_checking_cost_function(
CreateGradientCheckingCostFunction(&term,
- NULL,
+ nullptr,
kRelativeStepSize,
kRelativePrecision,
"Fuzzy banana",
@@ -242,7 +242,7 @@
GradientCheckingIterationCallback callback;
std::unique_ptr<CostFunction> gradient_checking_cost_function(
CreateGradientCheckingCostFunction(&term,
- NULL,
+ nullptr,
kRelativeStepSize,
kRelativePrecision,
"Fuzzy banana",
@@ -354,15 +354,15 @@
problem_impl.AddParameterBlock(w, 4, new QuaternionParameterization);
// clang-format off
problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3),
- NULL, x);
+ nullptr, x);
problem_impl.AddResidualBlock(new BinaryCostFunction(6, 5, 4),
- NULL, z, y);
+ nullptr, z, y);
problem_impl.AddResidualBlock(new BinaryCostFunction(3, 3, 5),
new TrivialLoss, x, z);
problem_impl.AddResidualBlock(new BinaryCostFunction(7, 5, 3),
- NULL, z, x);
+ nullptr, z, x);
problem_impl.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4),
- NULL, z, x, y);
+ nullptr, z, x, y);
// clang-format on
GradientCheckingIterationCallback callback;
@@ -432,15 +432,15 @@
problem_impl.AddParameterBlock(w, 4, new Quaternion);
// clang-format off
problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3),
- NULL, x);
+ nullptr, x);
problem_impl.AddResidualBlock(new BinaryCostFunction(6, 5, 4),
- NULL, z, y);
+ nullptr, z, y);
problem_impl.AddResidualBlock(new BinaryCostFunction(3, 3, 5),
new TrivialLoss, x, z);
problem_impl.AddResidualBlock(new BinaryCostFunction(7, 5, 3),
- NULL, z, x);
+ nullptr, z, x);
problem_impl.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4),
- NULL, z, x, y);
+ nullptr, z, x, y);
// clang-format on
GradientCheckingIterationCallback callback;
@@ -500,7 +500,7 @@
double x[] = {1.0, 2.0, 3.0};
ProblemImpl problem_impl;
problem_impl.AddParameterBlock(x, 3);
- problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+ problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), nullptr, x);
problem_impl.SetParameterLowerBound(x, 0, 0.9);
problem_impl.SetParameterUpperBound(x, 1, 2.5);
diff --git a/internal/ceres/gradient_problem_test.cc b/internal/ceres/gradient_problem_test.cc
index 7a279a3..70a4557 100644
--- a/internal/ceres/gradient_problem_test.cc
+++ b/internal/ceres/gradient_problem_test.cc
@@ -37,7 +37,7 @@
class QuadraticTestFunction : public ceres::FirstOrderFunction {
public:
- explicit QuadraticTestFunction(bool* flag_to_set_on_destruction = NULL)
+ explicit QuadraticTestFunction(bool* flag_to_set_on_destruction = nullptr)
: flag_to_set_on_destruction_(flag_to_set_on_destruction) {}
virtual ~QuadraticTestFunction() {
@@ -51,7 +51,7 @@
double* gradient) const final {
const double x = parameters[0];
cost[0] = x * x;
- if (gradient != NULL) {
+ if (gradient != nullptr) {
gradient[0] = 2.0 * x;
}
return true;
@@ -73,7 +73,7 @@
ceres::GradientProblem problem(new QuadraticTestFunction());
double x = 7.0;
double cost = 0;
- problem.Evaluate(&x, &cost, NULL);
+ problem.Evaluate(&x, &cost, nullptr);
EXPECT_EQ(x * x, cost);
}
@@ -82,7 +82,7 @@
new IdentityParameterization(1));
double x = 7.0;
double cost = 0;
- problem.Evaluate(&x, &cost, NULL);
+ problem.Evaluate(&x, &cost, nullptr);
EXPECT_EQ(x * x, cost);
}
@@ -110,7 +110,7 @@
new EuclideanManifold(1));
double x = 7.0;
double cost = 0;
- problem.Evaluate(&x, &cost, NULL);
+ problem.Evaluate(&x, &cost, nullptr);
EXPECT_EQ(x * x, cost);
}
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc
index f4e9f87..703d29d 100644
--- a/internal/ceres/problem.cc
+++ b/internal/ceres/problem.cc
@@ -189,12 +189,12 @@
int Problem::NumResiduals() const { return impl_->NumResiduals(); }
-int Problem::ParameterBlockSize(const double* parameter_block) const {
- return impl_->ParameterBlockSize(parameter_block);
+int Problem::ParameterBlockSize(const double* values) const {
+ return impl_->ParameterBlockSize(values);
}
-int Problem::ParameterBlockLocalSize(const double* parameter_block) const {
- return impl_->ParameterBlockTangentSize(parameter_block);
+int Problem::ParameterBlockLocalSize(const double* values) const {
+ return impl_->ParameterBlockTangentSize(values);
}
int Problem::ParameterBlockTangentSize(const double* parameter_block) const {
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index 9e1a2c1..a49cd5e 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -888,8 +888,8 @@
return parameter_block->TangentSize();
}
-bool ProblemImpl::HasParameterBlock(const double* parameter_block) const {
- return (parameter_block_map_.find(const_cast<double*>(parameter_block)) !=
+bool ProblemImpl::HasParameterBlock(const double* values) const {
+ return (parameter_block_map_.find(const_cast<double*>(values)) !=
parameter_block_map_.end());
}
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
index 2a3e1e4..9a0c4f8 100644
--- a/internal/ceres/problem_impl.h
+++ b/internal/ceres/problem_impl.h
@@ -143,10 +143,10 @@
int NumResidualBlocks() const;
int NumResiduals() const;
- int ParameterBlockSize(const double* parameter_block) const;
- int ParameterBlockTangentSize(const double* parameter_block) const;
+ int ParameterBlockSize(const double* values) const;
+ int ParameterBlockTangentSize(const double* values) const;
- bool HasParameterBlock(const double* parameter_block) const;
+ bool HasParameterBlock(const double* values) const;
void GetParameterBlocks(std::vector<double*>* parameter_blocks) const;
void GetResidualBlocks(std::vector<ResidualBlockId>* residual_blocks) const;
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
index 3d00f30..aab43d1 100644
--- a/internal/ceres/problem_test.cc
+++ b/internal/ceres/problem_test.cc
@@ -1581,8 +1581,8 @@
expected_gradient,
expected_jacobian,
cost,
- residuals.size() > 0 ? &residuals[0] : nullptr,
- gradient.size() > 0 ? &gradient[0] : nullptr,
+ !residuals.empty() ? &residuals[0] : nullptr,
+ !gradient.empty() ? &gradient[0] : nullptr,
dense_jacobian.data());
}
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
index 341564b..5fba205 100644
--- a/internal/ceres/program.cc
+++ b/internal/ceres/program.cc
@@ -33,6 +33,7 @@
#include <algorithm>
#include <map>
#include <memory>
+#include <string>
#include <vector>
#include "ceres/array_utils.h"