ClangTidy cleanups
Also some clang-format cleanups.
Change-Id: Ifb728f12ec130a5af33db789086f8a9baa078678
diff --git a/internal/ceres/block_jacobi_preconditioner_test.cc b/internal/ceres/block_jacobi_preconditioner_test.cc
index 6e35387..db60c7b 100644
--- a/internal/ceres/block_jacobi_preconditioner_test.cc
+++ b/internal/ceres/block_jacobi_preconditioner_test.cc
@@ -31,6 +31,7 @@
#include "ceres/block_jacobi_preconditioner.h"
#include <memory>
+#include <random>
#include <vector>
#include "Eigen/Dense"
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index 3452e64..64e018d 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -43,7 +43,6 @@
#include "ceres/wall_time.h"
#include "glog/logging.h"
-
namespace ceres::internal {
// A linear operator which takes a matrix A and a diagonal vector D and
@@ -120,7 +119,7 @@
}
}
-CgnrSolver::~CgnrSolver() {
+CgnrSolver::~CgnrSolver() {
for (int i = 0; i < 4; ++i) {
if (scratch_[i]) {
delete scratch_[i];
@@ -179,7 +178,6 @@
}
event_logger.AddEvent("Setup");
-
LinearOperatorAdapter preconditioner(*preconditioner_);
auto summary = ConjugateGradientsSolver(
cg_options, lhs, rhs, preconditioner, scratch_, cg_solution_);
@@ -198,12 +196,13 @@
// This is used to implement iterative general sparse linear solving with
// conjugate gradients, where A is the Jacobian and D is a regularizing
// parameter. A brief proof is included in cgnr_linear_operator.h.
-class CERES_NO_EXPORT CudaCgnrLinearOperator final : public
- ConjugateGradientsLinearOperator<CudaVector> {
+class CERES_NO_EXPORT CudaCgnrLinearOperator final
+ : public ConjugateGradientsLinearOperator<CudaVector> {
public:
CudaCgnrLinearOperator(CudaSparseMatrix& A,
const CudaVector& D,
- CudaVector* z) : A_(A), D_(D), z_(z) {}
+ CudaVector* z)
+ : A_(A), D_(D), z_(z) {}
void RightMultiplyAndAccumulate(const CudaVector& x, CudaVector& y) final {
// z = Ax
@@ -224,16 +223,16 @@
CudaVector* z_ = nullptr;
};
-class CERES_NO_EXPORT CudaIdentityPreconditioner final : public
- ConjugateGradientsLinearOperator<CudaVector> {
+class CERES_NO_EXPORT CudaIdentityPreconditioner final
+ : public ConjugateGradientsLinearOperator<CudaVector> {
public:
void RightMultiplyAndAccumulate(const CudaVector& x, CudaVector& y) final {
y.Axpby(1.0, x, 1.0);
}
};
-CudaCgnrSolver::CudaCgnrSolver(LinearSolver::Options options) :
- options_(std::move(options)) {}
+CudaCgnrSolver::CudaCgnrSolver(LinearSolver::Options options)
+ : options_(std::move(options)) {}
CudaCgnrSolver::~CudaCgnrSolver() {
for (int i = 0; i < 4; ++i) {
@@ -245,11 +244,13 @@
}
std::unique_ptr<CudaCgnrSolver> CudaCgnrSolver::Create(
- LinearSolver::Options options, std::string* error) {
+ LinearSolver::Options options, std::string* error) {
CHECK(error != nullptr);
if (options.preconditioner_type != IDENTITY) {
- *error = "CudaCgnrSolver does not support preconditioner type " +
- std::string(PreconditionerTypeToString(options.preconditioner_type)) + ". ";
+ *error =
+ "CudaCgnrSolver does not support preconditioner type " +
+ std::string(PreconditionerTypeToString(options.preconditioner_type)) +
+ ". ";
return nullptr;
}
CHECK(options.context->IsCUDAInitialized())
@@ -258,8 +259,9 @@
return solver;
}
-void CudaCgnrSolver::CpuToGpuTransfer(
- const CompressedRowSparseMatrix& A, const double* b, const double* D) {
+void CudaCgnrSolver::CpuToGpuTransfer(const CompressedRowSparseMatrix& A,
+ const double* b,
+ const double* D) {
if (A_ == nullptr) {
// Assume structure is not cached, do an initialization and structural copy.
A_ = std::make_unique<CudaSparseMatrix>(options_.context, A);
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h
index a971c81..9078d01 100644
--- a/internal/ceres/cgnr_solver.h
+++ b/internal/ceres/cgnr_solver.h
@@ -74,11 +74,12 @@
// A Cuda-accelerated version of CgnrSolver.
// This solver assumes that the sparsity structure of A remains constant for its
// lifetime.
-class CERES_NO_EXPORT CudaCgnrSolver final : public CompressedRowSparseMatrixSolver {
+class CERES_NO_EXPORT CudaCgnrSolver final
+ : public CompressedRowSparseMatrixSolver {
public:
explicit CudaCgnrSolver(LinearSolver::Options options);
- static std::unique_ptr<CudaCgnrSolver> Create(
- LinearSolver::Options options, std::string* error);
+ static std::unique_ptr<CudaCgnrSolver> Create(LinearSolver::Options options,
+ std::string* error);
~CudaCgnrSolver() override;
Summary SolveImpl(CompressedRowSparseMatrix* A,
@@ -87,8 +88,9 @@
double* x) final;
private:
- void CpuToGpuTransfer(
- const CompressedRowSparseMatrix& A, const double* b, const double* D);
+ void CpuToGpuTransfer(const CompressedRowSparseMatrix& A,
+ const double* b,
+ const double* D);
LinearSolver::Options options_;
std::unique_ptr<CudaSparseMatrix> A_;
diff --git a/internal/ceres/conjugate_gradients_solver_test.cc b/internal/ceres/conjugate_gradients_solver_test.cc
index e59eba7..a1f3c88 100644
--- a/internal/ceres/conjugate_gradients_solver_test.cc
+++ b/internal/ceres/conjugate_gradients_solver_test.cc
@@ -74,8 +74,8 @@
IdentityPreconditioner identity(A->num_cols());
LinearOperatorAdapter lhs(*A);
LinearOperatorAdapter preconditioner(identity);
- Vector* scratch_array[4] =
- {&scratch[0], &scratch[1], &scratch[2], &scratch[3]};
+ Vector* scratch_array[4] = {
+ &scratch[0], &scratch[1], &scratch[2], &scratch[3]};
auto summary = ConjugateGradientsSolver(
cg_options, lhs, b, preconditioner, scratch_array, x);
@@ -136,8 +136,8 @@
for (int i = 0; i < 4; ++i) {
scratch[i] = Vector::Zero(A->num_cols());
}
- Vector* scratch_array[4] =
- {&scratch[0], &scratch[1], &scratch[2], &scratch[3]};
+ Vector* scratch_array[4] = {
+ &scratch[0], &scratch[1], &scratch[2], &scratch[3]};
IdentityPreconditioner identity(A->num_cols());
LinearOperatorAdapter lhs(*A);
LinearOperatorAdapter preconditioner(identity);
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
index 91cf221..fe1e12e 100644
--- a/internal/ceres/evaluator.cc
+++ b/internal/ceres/evaluator.cc
@@ -66,9 +66,9 @@
case ITERATIVE_SCHUR:
case CGNR: {
if (options.sparse_linear_algebra_library_type == CUDA_SPARSE) {
- return std::make_unique<
- ProgramEvaluator<ScratchEvaluatePreparer,
- CompressedRowJacobianWriter>>(options, program);
+ return std::make_unique<ProgramEvaluator<ScratchEvaluatePreparer,
+ CompressedRowJacobianWriter>>(
+ options, program);
} else {
return std::make_unique<
ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>>(
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
index 7ea523c..61e76f8 100644
--- a/internal/ceres/evaluator.h
+++ b/internal/ceres/evaluator.h
@@ -65,7 +65,8 @@
int num_threads = 1;
int num_eliminate_blocks = -1;
LinearSolverType linear_solver_type = DENSE_QR;
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = NO_SPARSE;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+ NO_SPARSE;
bool dynamic_sparsity = false;
ContextImpl* context = nullptr;
EvaluationCallback* evaluation_callback = nullptr;
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index 0463afb..db4ad2b 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -129,9 +129,7 @@
for (int i = 0; i < 4; ++i) {
scratch[i] = Vector::Zero(schur_complement_->num_cols());
}
- Vector* scratch_ptr[4] = {
- &scratch[0], &scratch[1], &scratch[2], &scratch[3]
- };
+ Vector* scratch_ptr[4] = {&scratch[0], &scratch[1], &scratch[2], &scratch[3]};
event_logger.AddEvent("Setup");
diff --git a/internal/ceres/normal_prior_test.cc b/internal/ceres/normal_prior_test.cc
index 881038f..ec1e064 100644
--- a/internal/ceres/normal_prior_test.cc
+++ b/internal/ceres/normal_prior_test.cc
@@ -30,6 +30,7 @@
#include "ceres/normal_prior.h"
+#include <algorithm>
#include <cstddef>
#include <random>
diff --git a/internal/ceres/schur_eliminator_benchmark.cc b/internal/ceres/schur_eliminator_benchmark.cc
index c6425ce..1c96937 100644
--- a/internal/ceres/schur_eliminator_benchmark.cc
+++ b/internal/ceres/schur_eliminator_benchmark.cc
@@ -28,6 +28,7 @@
//
// Authors: sameeragarwal@google.com (Sameer Agarwal)
+#include <algorithm>
#include <memory>
#include <random>
@@ -92,7 +93,7 @@
matrix_ = std::make_unique<BlockSparseMatrix>(bs);
double* values = matrix_->mutable_values();
std::generate_n(values, matrix_->num_nonzeros(), [this] {
- return standard_normal(prng);
+ return standard_normal(prng_);
});
b_.resize(matrix_->num_rows());
@@ -126,7 +127,7 @@
Vector diagonal_;
Vector z_;
Vector y_;
- std::mt19937 prng;
+ std::mt19937 prng_;
std::normal_distribution<> standard_normal;
};
diff --git a/internal/ceres/schur_eliminator_test.cc b/internal/ceres/schur_eliminator_test.cc
index 17ce67d..c5ba328 100644
--- a/internal/ceres/schur_eliminator_test.cc
+++ b/internal/ceres/schur_eliminator_test.cc
@@ -30,6 +30,7 @@
#include "ceres/schur_eliminator.h"
+#include <algorithm>
#include <memory>
#include <random>
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index 84487bb..ea18913 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -669,6 +669,7 @@
return internal::StringPrintf("%s,%s,%s", row.c_str(), e.c_str(), f.c_str());
}
+#ifndef CERES_NO_CUDA
bool IsCudaRequired(const Solver::Options& options) {
if (options.linear_solver_type == DENSE_NORMAL_CHOLESKY ||
options.linear_solver_type == DENSE_SCHUR ||
@@ -680,6 +681,7 @@
}
return false;
}
+#endif
} // namespace
diff --git a/internal/ceres/solver_test.cc b/internal/ceres/solver_test.cc
index 92c1ddc..b796873 100644
--- a/internal/ceres/solver_test.cc
+++ b/internal/ceres/solver_test.cc
@@ -883,7 +883,7 @@
options.dynamic_sparsity = false;
options.use_mixed_precision_solves = false;
EXPECT_EQ(options.IsValid(&message),
- IsSparseLinearAlgebraLibraryTypeAvailable(CUDA_SPARSE));
+ IsSparseLinearAlgebraLibraryTypeAvailable(CUDA_SPARSE));
options.dynamic_sparsity = true;
options.use_mixed_precision_solves = false;
diff --git a/internal/ceres/sparse_linear_operator_benchmark.cc b/internal/ceres/sparse_linear_operator_benchmark.cc
index f724a01..a08911e 100644
--- a/internal/ceres/sparse_linear_operator_benchmark.cc
+++ b/internal/ceres/sparse_linear_operator_benchmark.cc
@@ -100,8 +100,7 @@
values_offset += m_d * kResidualSize;
}
}
- std::unique_ptr<BlockSparseMatrix> jacobian =
- std::make_unique<BlockSparseMatrix>(bs);
+ auto jacobian = std::make_unique<BlockSparseMatrix>(bs);
VectorRef(jacobian->mutable_values(), jacobian->num_nonzeros()).setRandom();
return jacobian;
}
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
index 5fb7f26..23b7a31 100644
--- a/internal/ceres/subset_preconditioner_test.cc
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -101,13 +101,13 @@
options.max_row_block_size = 4;
options.block_density = 0.9;
- m_ = BlockSparseMatrix::CreateRandomMatrix(options, prng);
+ m_ = BlockSparseMatrix::CreateRandomMatrix(options, prng_);
start_row_block_ = m_->block_structure()->rows.size();
// Ensure that the bottom part of the matrix has the same column
// block structure.
options.col_blocks = m_->block_structure()->cols;
- b_ = BlockSparseMatrix::CreateRandomMatrix(options, prng);
+ b_ = BlockSparseMatrix::CreateRandomMatrix(options, prng_);
m_->AppendRows(*b_);
// Create a Identity block diagonal matrix with the same column
@@ -133,7 +133,7 @@
std::unique_ptr<Preconditioner> preconditioner_;
Vector diagonal_;
int start_row_block_;
- std::mt19937 prng;
+ std::mt19937 prng_;
};
TEST_P(SubsetPreconditionerTest, foo) {