Commenting unused parameters for better readibility
Change-Id: Idc285fa68ba787636a69a3ea3350e0282b9f8569
diff --git a/bazel/ceres.bzl b/bazel/ceres.bzl
index b7ab0bd..a1117aa 100644
--- a/bazel/ceres.bzl
+++ b/bazel/ceres.bzl
@@ -186,6 +186,7 @@
]),
copts = [
"-I" + internal,
+ "-Wunused-parameter",
"-Wno-sign-compare",
] + schur_eliminator_copts,
diff --git a/internal/ceres/callbacks.cc b/internal/ceres/callbacks.cc
index b46f62c..79f35e0 100644
--- a/internal/ceres/callbacks.cc
+++ b/internal/ceres/callbacks.cc
@@ -46,7 +46,7 @@
StateUpdatingCallback::~StateUpdatingCallback() = default;
CallbackReturnType StateUpdatingCallback::operator()(
- const IterationSummary& summary) {
+ const IterationSummary& /*summary*/) {
program_->StateVectorToParameterBlocks(parameters_);
program_->CopyParameterBlockStateToUserState();
return SOLVER_CONTINUE;
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index cd0caf2..e99075e 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -62,7 +62,7 @@
const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
- std::string* error) {
+ std::string* /*error*/) {
parameter_blocks_.clear();
independent_set_offsets_.clear();
independent_set_offsets_.push_back(0);
@@ -118,7 +118,7 @@
void CoordinateDescentMinimizer::Minimize(const Minimizer::Options& options,
double* parameters,
- Solver::Summary* summary) {
+ Solver::Summary* /*summary*/) {
// Set the state and mark all parameter blocks constant.
for (auto* parameter_block : parameter_blocks_) {
parameter_block->SetState(parameters + parameter_block->state_offset());
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index 4245e43..d7a7c07 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -468,17 +468,12 @@
// Iterate over the covariance blocks contained in this row block
// and count the number of columns in this row block.
int num_col_blocks = 0;
-
- // TODO(sameeragarwal): num_columns is being computed but not
- // being used.
- int num_columns = 0;
for (int j = i; j < covariance_blocks.size(); ++j, ++num_col_blocks) {
const std::pair<const double*, const double*>& block_pair =
covariance_blocks[j];
if (block_pair.first != row_block) {
break;
}
- num_columns += problem->ParameterBlockTangentSize(block_pair.second);
}
// Fill out all the compressed rows for this parameter block.
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index 0db57de..adfec3c 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -632,7 +632,7 @@
reuse_ = false;
}
-void DoglegStrategy::StepRejected(double step_quality) {
+void DoglegStrategy::StepRejected(double /*step_quality*/) {
radius_ *= 0.5;
reuse_ = true;
}
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index 81cf933..ecbc47f 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -175,6 +175,8 @@
DynamicSparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
CompressedRowSparseMatrix* A, double* rhs_and_solution) {
#ifdef CERES_NO_SUITESPARSE
+ (void) A;
+ (void) rhs_and_solution;
LinearSolver::Summary summary;
summary.num_iterations = 0;
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
index 013a98a..ac013fe 100644
--- a/internal/ceres/gradient_checking_cost_function.cc
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -131,7 +131,7 @@
: gradient_error_detected_(false) {}
CallbackReturnType GradientCheckingIterationCallback::operator()(
- const IterationSummary& summary) {
+ const IterationSummary& /*summary*/) {
if (gradient_error_detected_) {
LOG(ERROR) << "Gradient error detected. Terminating solver.";
return SOLVER_ABORT;
diff --git a/internal/ceres/gradient_problem_evaluator.h b/internal/ceres/gradient_problem_evaluator.h
index f1a61b1..c420b62 100644
--- a/internal/ceres/gradient_problem_evaluator.h
+++ b/internal/ceres/gradient_problem_evaluator.h
@@ -52,10 +52,10 @@
std::unique_ptr<SparseMatrix> CreateJacobian() const final { return nullptr; }
- bool Evaluate(const EvaluateOptions& evaluate_options,
+ bool Evaluate(const EvaluateOptions& /*evaluate_options*/,
const double* state,
double* cost,
- double* residuals,
+ double* /*residuals*/,
double* gradient,
SparseMatrix* jacobian) final {
CHECK(jacobian == nullptr);
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index 76e73b4..81ced2c 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -163,7 +163,7 @@
reuse_diagonal_ = false;
}
-void LevenbergMarquardtStrategy::StepRejected(double step_quality) {
+void LevenbergMarquardtStrategy::StepRejected(double /*step_quality*/) {
radius_ = radius_ / decrease_factor_;
decrease_factor_ *= 2.0;
reuse_diagonal_ = true;
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
index f14292f..68c77f7 100644
--- a/internal/ceres/line_search_direction.cc
+++ b/internal/ceres/line_search_direction.cc
@@ -42,7 +42,7 @@
class CERES_NO_EXPORT SteepestDescent final : public LineSearchDirection {
public:
- bool NextDirection(const LineSearchMinimizer::State& previous,
+ bool NextDirection(const LineSearchMinimizer::State& /*previous*/,
const LineSearchMinimizer::State& current,
Vector* search_direction) override {
*search_direction = -current.gradient;
diff --git a/internal/ceres/line_search_minimizer.h b/internal/ceres/line_search_minimizer.h
index 68936a3..abbf4c5 100644
--- a/internal/ceres/line_search_minimizer.h
+++ b/internal/ceres/line_search_minimizer.h
@@ -46,7 +46,7 @@
class CERES_NO_EXPORT LineSearchMinimizer final : public Minimizer {
public:
struct State {
- State(int num_parameters, int num_effective_parameters)
+ State(int /*num_parameters*/, int num_effective_parameters)
: cost(0.0),
gradient(num_effective_parameters),
gradient_squared_norm(0.0),
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
index 7b87c9a..0b818f5 100644
--- a/internal/ceres/linear_least_squares_problems.cc
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -930,7 +930,7 @@
const double* D,
const double* b,
const double* x,
- int num_eliminate_blocks) {
+ int /*num_eliminate_blocks*/) {
CHECK(A != nullptr);
Matrix AA;
A->ToDenseMatrix(&AA);
@@ -968,7 +968,7 @@
const double* D,
const double* b,
const double* x,
- int num_eliminate_blocks) {
+ int /*num_eliminate_blocks*/) {
CHECK(A != nullptr);
LOG(INFO) << "writing to: " << filename_base << "*";
diff --git a/internal/ceres/manifold.cc b/internal/ceres/manifold.cc
index a2e4c6d..c4895fd 100644
--- a/internal/ceres/manifold.cc
+++ b/internal/ceres/manifold.cc
@@ -196,7 +196,7 @@
return true;
}
-bool SubsetManifold::PlusJacobian(const double* x,
+bool SubsetManifold::PlusJacobian(const double* /*x*/,
double* plus_jacobian) const {
if (tangent_size_ == 0) {
return true;
@@ -213,7 +213,7 @@
return true;
}
-bool SubsetManifold::RightMultiplyByPlusJacobian(const double* x,
+bool SubsetManifold::RightMultiplyByPlusJacobian(const double* /*x*/,
const int num_rows,
const double* ambient_matrix,
double* tangent_matrix) const {
@@ -249,7 +249,7 @@
return true;
}
-bool SubsetManifold::MinusJacobian(const double* x,
+bool SubsetManifold::MinusJacobian(const double* /*x*/,
double* minus_jacobian) const {
const int ambient_size = AmbientSize();
MatrixRef m(minus_jacobian, tangent_size_, ambient_size);
diff --git a/internal/ceres/parallel_for.h b/internal/ceres/parallel_for.h
index 26a4a79..bf38603 100644
--- a/internal/ceres/parallel_for.h
+++ b/internal/ceres/parallel_for.h
@@ -152,7 +152,6 @@
int partition_start = start;
int cost_offset = cumulative_cost_offset;
- const CumulativeCostData* const range_end = cumulative_cost_data + end;
while (partition_start < end) {
// Already have max_num_partitions
if (partition->size() > max_num_partitions) {
diff --git a/internal/ceres/parallel_for_cxx.cc b/internal/ceres/parallel_for_cxx.cc
index 44b0be5..981f724 100644
--- a/internal/ceres/parallel_for_cxx.cc
+++ b/internal/ceres/parallel_for_cxx.cc
@@ -63,8 +63,7 @@
ThreadPoolState::ThreadPoolState(int start,
int end,
- int num_work_blocks,
- int num_workers)
+ int num_work_blocks)
: start(start),
end(end),
num_work_blocks(num_work_blocks),
diff --git a/internal/ceres/parallel_for_cxx.h b/internal/ceres/parallel_for_cxx.h
index c33ae08..fb87156 100644
--- a/internal/ceres/parallel_for_cxx.h
+++ b/internal/ceres/parallel_for_cxx.h
@@ -87,7 +87,7 @@
// Note that this splitting is optimal in the sense of maximal difference
// between block sizes, since splitting into equal blocks is possible
// if and only if number of indices is divisible by number of blocks.
- ThreadPoolState(int start, int end, int num_work_blocks, int num_workers);
+ ThreadPoolState(int start, int end, int num_work_blocks);
// The start and end index of the for loop.
const int start;
@@ -160,7 +160,7 @@
// the work before the tasks have been popped off the queue. So the
// shared state needs to exist for the duration of all the tasks.
std::shared_ptr<ThreadPoolState> shared_state(
- new ThreadPoolState(start, end, num_work_blocks, num_threads));
+ new ThreadPoolState(start, end, num_work_blocks));
// A function which tries to perform several chunks of work.
auto task = [shared_state, num_threads, &function]() {
diff --git a/internal/ceres/power_series_expansion_preconditioner.cc b/internal/ceres/power_series_expansion_preconditioner.cc
index 7a36d92..cdb7bf8 100644
--- a/internal/ceres/power_series_expansion_preconditioner.cc
+++ b/internal/ceres/power_series_expansion_preconditioner.cc
@@ -43,8 +43,8 @@
PowerSeriesExpansionPreconditioner::~PowerSeriesExpansionPreconditioner() =
default;
-bool PowerSeriesExpansionPreconditioner::Update(const LinearOperator& A,
- const double* D) {
+bool PowerSeriesExpansionPreconditioner::Update(const LinearOperator& /*A*/,
+ const double* /*D*/) {
return true;
}
diff --git a/internal/ceres/preconditioner.cc b/internal/ceres/preconditioner.cc
index 3e2bf41..895a406 100644
--- a/internal/ceres/preconditioner.cc
+++ b/internal/ceres/preconditioner.cc
@@ -55,8 +55,8 @@
SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() =
default;
-bool SparseMatrixPreconditionerWrapper::UpdateImpl(const SparseMatrix& A,
- const double* D) {
+bool SparseMatrixPreconditionerWrapper::UpdateImpl(const SparseMatrix& /*A*/,
+ const double* /*D*/) {
return true;
}
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index 654ef60..46fd1fa 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -147,7 +147,7 @@
public:
IdentityPreconditioner(int num_rows) : num_rows_(num_rows) {}
- bool Update(const LinearOperator& A, const double* D) final { return true; }
+ bool Update(const LinearOperator& /*A*/, const double* /*D*/) final { return true; }
void RightMultiplyAndAccumulate(const double* x, double* y) const final {
VectorRef(y, num_rows_) += ConstVectorRef(x, num_rows_);
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index 4440526..7a28424 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -359,7 +359,7 @@
InternalAddParameterBlock(values, size);
}
-void ProblemImpl::InternalSetManifold(double* values,
+void ProblemImpl::InternalSetManifold(double* /*values*/,
ParameterBlock* parameter_block,
Manifold* manifold) {
if (manifold != nullptr && options_.manifold_ownership == TAKE_OWNERSHIP) {
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
index cc4da9f..7ccbbd1 100644
--- a/internal/ceres/program_evaluator.h
+++ b/internal/ceres/program_evaluator.h
@@ -105,7 +105,7 @@
namespace internal {
struct NullJacobianFinalizer {
- void operator()(SparseMatrix* jacobian, int num_parameters) {}
+ void operator()(SparseMatrix* /*jacobian*/, int /*num_parameters*/) {}
};
template <typename EvaluatePreparer,
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
index 6c5318b..56b0a40 100644
--- a/internal/ceres/reorder_program.cc
+++ b/internal/ceres/reorder_program.cc
@@ -113,6 +113,12 @@
const ParameterBlockOrdering& parameter_block_ordering,
int* ordering) {
#ifdef CERES_NO_SUITESPARSE
+ // "Void"ing values to avoid compiler warnings about unused parameters
+ (void) linear_solver_ordering_type;
+ (void) tsm_block_jacobian_transpose;
+ (void) parameter_blocks;
+ (void) parameter_block_ordering;
+ (void) ordering;
LOG(FATAL) << "Congratulations, you found a Ceres bug! "
<< "Please report this error to the developers.";
#else
@@ -243,7 +249,7 @@
bool LexicographicallyOrderResidualBlocks(
const int size_of_first_elimination_group,
Program* program,
- std::string* error) {
+ std::string* /*error*/) {
CHECK_GE(size_of_first_elimination_group, 1)
<< "Congratulations, you found a Ceres bug! Please report this error "
<< "to the developers.";
@@ -324,8 +330,13 @@
// Pre-order the columns corresponding to the Schur complement if
// possible.
static void ReorderSchurComplementColumnsUsingSuiteSparse(
- const ParameterBlockOrdering& parameter_block_ordering, Program* program) {
-#ifndef CERES_NO_SUITESPARSE
+ const ParameterBlockOrdering& parameter_block_ordering,
+ Program* program) {
+#ifdef CERES_NO_SUITESPARSE
+ // "Void"ing values to avoid compiler warnings about unused parameters
+ (void) parameter_block_ordering;
+ (void) program;
+#else
SuiteSparse ss;
std::vector<int> constraints;
std::vector<ParameterBlock*>& parameter_blocks =
@@ -365,7 +376,7 @@
static void ReorderSchurComplementColumnsUsingEigen(
LinearSolverOrderingType ordering_type,
const int size_of_first_elimination_group,
- const ProblemImpl::ParameterMap& parameter_map,
+ const ProblemImpl::ParameterMap& /*parameter_map*/,
Program* program) {
#if defined(CERES_USE_EIGEN_SPARSE)
std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
diff --git a/internal/ceres/residual_block.cc b/internal/ceres/residual_block.cc
index 93c045f..dff2b36 100644
--- a/internal/ceres/residual_block.cc
+++ b/internal/ceres/residual_block.cc
@@ -113,8 +113,7 @@
return false;
}
- if (!IsEvaluationValid(
- *this, parameters.data(), cost, residuals, eval_jacobians)) {
+ if (!IsEvaluationValid(*this, parameters.data(), residuals, eval_jacobians)) {
// clang-format off
std::string message =
"\n\n"
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
index 74eaf58..4d03f94 100644
--- a/internal/ceres/residual_block_utils.cc
+++ b/internal/ceres/residual_block_utils.cc
@@ -114,9 +114,11 @@
return result;
}
+// TODO(sameeragarwal) Check cost value validness here
+// Cost value is a part of evaluation but not checked here since according to residual_block.cc
+// cost is not valid at the time this method is called
bool IsEvaluationValid(const ResidualBlock& block,
- double const* const* parameters,
- double* cost,
+ double const* const* /*parameters*/,
double* residuals,
double** jacobians) {
const int num_parameter_blocks = block.NumParameterBlocks();
diff --git a/internal/ceres/residual_block_utils.h b/internal/ceres/residual_block_utils.h
index 38c75bf..f539056 100644
--- a/internal/ceres/residual_block_utils.h
+++ b/internal/ceres/residual_block_utils.h
@@ -63,7 +63,6 @@
CERES_NO_EXPORT
bool IsEvaluationValid(const ResidualBlock& block,
double const* const* parameters,
- double* cost,
double* residuals,
double** jacobians);
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 1972dc0..d79cf46 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -179,7 +179,7 @@
// BlockRandomAccessDenseMatrix. The linear system is solved using
// Eigen's Cholesky factorization.
LinearSolver::Summary DenseSchurComplementSolver::SolveReducedLinearSystem(
- const LinearSolver::PerSolveOptions& per_solve_options, double* solution) {
+ const LinearSolver::PerSolveOptions& /*per_solve_options*/, double* solution) {
LinearSolver::Summary summary;
summary.num_iterations = 0;
summary.termination_type = LinearSolverTerminationType::SUCCESS;
diff --git a/internal/ceres/schur_eliminator.h b/internal/ceres/schur_eliminator.h
index 87cc5a8..56e4bff 100644
--- a/internal/ceres/schur_eliminator.h
+++ b/internal/ceres/schur_eliminator.h
@@ -382,8 +382,9 @@
class CERES_NO_EXPORT SchurEliminatorForOneFBlock final
: public SchurEliminatorBase {
public:
+ // TODO(sameeragarwal) Find out why "assume_full_rank_ete" is not used here
void Init(int num_eliminate_blocks,
- bool assume_full_rank_ete,
+ bool /*assume_full_rank_ete*/,
const CompressedRowBlockStructure* bs) override {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
@@ -569,7 +570,7 @@
// y_i = e_t_e_inverse * sum_i e_i^T * (b_i - f_i * z);
void BackSubstitute(const BlockSparseMatrixData& A,
const double* b,
- const double* D,
+ const double* /*D*/,
const double* z_ptr,
double* y) override {
typename EigenTypes<kFBlockSize>::ConstVectorRef z(z_ptr, kFBlockSize);
diff --git a/internal/ceres/sparse_matrix.h b/internal/ceres/sparse_matrix.h
index 6cc421b..b5db4b7 100644
--- a/internal/ceres/sparse_matrix.h
+++ b/internal/ceres/sparse_matrix.h
@@ -90,7 +90,7 @@
// A = 0. A->num_nonzeros() == 0 is true after this call. The
// sparsity pattern is preserved.
virtual void SetZero() = 0;
- virtual void SetZero(ContextImpl* contex, int num_threads) { SetZero(); }
+ virtual void SetZero(ContextImpl* /*context*/, int /*num_threads*/) { SetZero(); }
// Resize and populate dense_matrix with a dense version of the
// sparse matrix.