ClangFormat and ClangTidy changes
Change-Id: Ib457dcc55ffb405aeaeac711c20bd9217b32f90e
diff --git a/include/ceres/manifold_test_utils.h b/include/ceres/manifold_test_utils.h
index dd1f931..f8ed557 100644
--- a/include/ceres/manifold_test_utils.h
+++ b/include/ceres/manifold_test_utils.h
@@ -76,17 +76,20 @@
// EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, delta, y, kTolerance);
// }
#define EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, delta, y, tolerance) \
- ::ceres::Vector zero_tangent = ::ceres::Vector::Zero(manifold.TangentSize()); \
- EXPECT_THAT(manifold, ::ceres::XPlusZeroIsXAt(x, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::XMinusXIsZeroAt(x, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::MinusPlusIsIdentityAt(x, delta, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::MinusPlusIsIdentityAt(x, zero_tangent, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::PlusMinusIsIdentityAt(x, x, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::PlusMinusIsIdentityAt(x, y, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::HasCorrectPlusJacobianAt(x, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::HasCorrectMinusJacobianAt(x, tolerance)); \
+ ::ceres::Vector zero_tangent = \
+ ::ceres::Vector::Zero(manifold.TangentSize()); \
+ EXPECT_THAT(manifold, ::ceres::XPlusZeroIsXAt(x, tolerance)); \
+ EXPECT_THAT(manifold, ::ceres::XMinusXIsZeroAt(x, tolerance)); \
+ EXPECT_THAT(manifold, ::ceres::MinusPlusIsIdentityAt(x, delta, tolerance)); \
+ EXPECT_THAT(manifold, \
+ ::ceres::MinusPlusIsIdentityAt(x, zero_tangent, tolerance)); \
+ EXPECT_THAT(manifold, ::ceres::PlusMinusIsIdentityAt(x, x, tolerance)); \
+ EXPECT_THAT(manifold, ::ceres::PlusMinusIsIdentityAt(x, y, tolerance)); \
+ EXPECT_THAT(manifold, ::ceres::HasCorrectPlusJacobianAt(x, tolerance)); \
+ EXPECT_THAT(manifold, ::ceres::HasCorrectMinusJacobianAt(x, tolerance)); \
EXPECT_THAT(manifold, ::ceres::MinusPlusJacobianIsIdentityAt(x, tolerance)); \
- EXPECT_THAT(manifold, ::ceres::HasCorrectRightMultiplyByPlusJacobianAt(x, tolerance));
+ EXPECT_THAT(manifold, \
+ ::ceres::HasCorrectRightMultiplyByPlusJacobianAt(x, tolerance));
// Checks that the invariant Plus(x, 0) == x holds.
MATCHER_P2(XPlusZeroIsXAt, x, tolerance, "") {
diff --git a/include/ceres/problem.h b/include/ceres/problem.h
index fac57c9..7b157d1 100644
--- a/include/ceres/problem.h
+++ b/include/ceres/problem.h
@@ -538,7 +538,7 @@
double** jacobians) const;
// Returns reference to the options with which the Problem was constructed.
- const Options & options() const;
+ const Options& options() const;
// Returns pointer to Problem implementation
internal::ProblemImpl* mutable_impl();
diff --git a/internal/ceres/block_jacobi_preconditioner.h b/internal/ceres/block_jacobi_preconditioner.h
index a39ad12..533b764 100644
--- a/internal/ceres/block_jacobi_preconditioner.h
+++ b/internal/ceres/block_jacobi_preconditioner.h
@@ -48,6 +48,10 @@
// This version of the preconditioner is for use with BlockSparseMatrix
// Jacobians.
+//
+// TODO(https://github.com/ceres-solver/ceres-solver/issues/936):
+// BlockSparseJacobiPreconditioner::RightMultiply will benefit from
+// multithreading
class CERES_NO_EXPORT BlockSparseJacobiPreconditioner
: public BlockSparseMatrixPreconditioner {
public:
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
index c81ea39..f49f229 100644
--- a/internal/ceres/block_sparse_matrix.cc
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -153,7 +153,8 @@
});
}
-// TODO: This method might benefit from caching column-block partition
+// TODO(https://github.com/ceres-solver/ceres-solver/issues/933): This method
+// might benefit from caching column-block partition
void BlockSparseMatrix::LeftMultiplyAndAccumulate(const double* x,
double* y,
ContextImpl* context,
@@ -244,7 +245,8 @@
}
}
-// TODO: This method might benefit from caching column-block partition
+// TODO(https://github.com/ceres-solver/ceres-solver/issues/933): This method
+// might benefit from caching column-block partition
void BlockSparseMatrix::SquaredColumnNorm(double* x,
ContextImpl* context,
int num_threads) const {
@@ -295,7 +297,8 @@
}
}
-// TODO: This method might benefit from caching column-block partition
+// TODO(https://github.com/ceres-solver/ceres-solver/issues/933): This method
+// might benefit from caching column-block partition
void BlockSparseMatrix::ScaleColumns(const double* scale,
ContextImpl* context,
int num_threads) {
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
index 0529a2d..5036aa2 100644
--- a/internal/ceres/block_sparse_matrix_test.cc
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -126,15 +126,15 @@
std::unique_ptr<LinearLeastSquaresProblem> problem =
CreateLinearLeastSquaresProblemFromId(2);
CHECK(problem != nullptr);
- A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+ a_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
problem = CreateLinearLeastSquaresProblemFromId(1);
CHECK(problem != nullptr);
- B_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+ b_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
- CHECK_EQ(A_->num_rows(), B_->num_rows());
- CHECK_EQ(A_->num_cols(), B_->num_cols());
- CHECK_EQ(A_->num_nonzeros(), B_->num_nonzeros());
+ CHECK_EQ(a_->num_rows(), b_->num_rows());
+ CHECK_EQ(a_->num_cols(), b_->num_cols());
+ CHECK_EQ(a_->num_nonzeros(), b_->num_nonzeros());
context_.EnsureMinimumThreads(kNumThreads);
BlockSparseMatrix::RandomMatrixOptions options;
@@ -147,67 +147,67 @@
options.block_density = 0.05;
std::mt19937 rng;
- C_ = BlockSparseMatrix::CreateRandomMatrix(options, rng);
+ c_ = BlockSparseMatrix::CreateRandomMatrix(options, rng);
}
- std::unique_ptr<BlockSparseMatrix> A_;
- std::unique_ptr<TripletSparseMatrix> B_;
- std::unique_ptr<BlockSparseMatrix> C_;
+ std::unique_ptr<BlockSparseMatrix> a_;
+ std::unique_ptr<TripletSparseMatrix> b_;
+ std::unique_ptr<BlockSparseMatrix> c_;
ContextImpl context_;
};
TEST_F(BlockSparseMatrixTest, SetZeroTest) {
- A_->SetZero();
- EXPECT_EQ(13, A_->num_nonzeros());
+ a_->SetZero();
+ EXPECT_EQ(13, a_->num_nonzeros());
}
TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateTest) {
- Vector y_a = Vector::Zero(A_->num_rows());
- Vector y_b = Vector::Zero(A_->num_rows());
- for (int i = 0; i < A_->num_cols(); ++i) {
- Vector x = Vector::Zero(A_->num_cols());
+ Vector y_a = Vector::Zero(a_->num_rows());
+ Vector y_b = Vector::Zero(a_->num_rows());
+ for (int i = 0; i < a_->num_cols(); ++i) {
+ Vector x = Vector::Zero(a_->num_cols());
x[i] = 1.0;
- A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
- B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
+ a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+ b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
EXPECT_LT((y_a - y_b).norm(), 1e-12);
}
}
TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateParallelTest) {
- Vector y_0 = Vector::Random(A_->num_rows());
+ Vector y_0 = Vector::Random(a_->num_rows());
Vector y_s = y_0;
Vector y_p = y_0;
- Vector x = Vector::Random(A_->num_cols());
- A_->RightMultiplyAndAccumulate(x.data(), y_s.data());
+ Vector x = Vector::Random(a_->num_cols());
+ a_->RightMultiplyAndAccumulate(x.data(), y_s.data());
- A_->RightMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads);
+ a_->RightMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads);
// Current parallel implementation is expected to be bit-exact
EXPECT_EQ((y_s - y_p).norm(), 0.);
}
TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateTest) {
- Vector y_a = Vector::Zero(A_->num_cols());
- Vector y_b = Vector::Zero(A_->num_cols());
- for (int i = 0; i < A_->num_rows(); ++i) {
- Vector x = Vector::Zero(A_->num_rows());
+ Vector y_a = Vector::Zero(a_->num_cols());
+ Vector y_b = Vector::Zero(a_->num_cols());
+ for (int i = 0; i < a_->num_rows(); ++i) {
+ Vector x = Vector::Zero(a_->num_rows());
x[i] = 1.0;
- A_->LeftMultiplyAndAccumulate(x.data(), y_a.data());
- B_->LeftMultiplyAndAccumulate(x.data(), y_b.data());
+ a_->LeftMultiplyAndAccumulate(x.data(), y_a.data());
+ b_->LeftMultiplyAndAccumulate(x.data(), y_b.data());
EXPECT_LT((y_a - y_b).norm(), 1e-12);
}
}
TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateParallelTest) {
- Vector y_0 = Vector::Random(A_->num_cols());
+ Vector y_0 = Vector::Random(a_->num_cols());
Vector y_s = y_0;
Vector y_p = y_0;
- Vector x = Vector::Random(A_->num_rows());
- A_->LeftMultiplyAndAccumulate(x.data(), y_s.data());
+ Vector x = Vector::Random(a_->num_rows());
+ a_->LeftMultiplyAndAccumulate(x.data(), y_s.data());
- A_->LeftMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads);
+ a_->LeftMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads);
// Parallel implementation for left products uses a different order of
// traversal, thus results might be different
@@ -215,49 +215,49 @@
}
TEST_F(BlockSparseMatrixTest, SquaredColumnNormTest) {
- Vector y_a = Vector::Zero(A_->num_cols());
- Vector y_b = Vector::Zero(A_->num_cols());
- A_->SquaredColumnNorm(y_a.data());
- B_->SquaredColumnNorm(y_b.data());
+ Vector y_a = Vector::Zero(a_->num_cols());
+ Vector y_b = Vector::Zero(a_->num_cols());
+ a_->SquaredColumnNorm(y_a.data());
+ b_->SquaredColumnNorm(y_b.data());
EXPECT_LT((y_a - y_b).norm(), 1e-12);
}
TEST_F(BlockSparseMatrixTest, SquaredColumnNormParallelTest) {
- Vector y_a = Vector::Zero(C_->num_cols());
- Vector y_b = Vector::Zero(C_->num_cols());
- C_->SquaredColumnNorm(y_a.data());
+ Vector y_a = Vector::Zero(c_->num_cols());
+ Vector y_b = Vector::Zero(c_->num_cols());
+ c_->SquaredColumnNorm(y_a.data());
- C_->SquaredColumnNorm(y_b.data(), &context_, kNumThreads);
+ c_->SquaredColumnNorm(y_b.data(), &context_, kNumThreads);
EXPECT_LT((y_a - y_b).norm(), 1e-12);
}
TEST_F(BlockSparseMatrixTest, ScaleColumnsTest) {
- const Vector scale = Vector::Random(C_->num_cols()).cwiseAbs();
+ const Vector scale = Vector::Random(c_->num_cols()).cwiseAbs();
- const Vector x = Vector::Random(C_->num_rows());
- Vector y_expected = Vector::Zero(C_->num_cols());
- C_->LeftMultiplyAndAccumulate(x.data(), y_expected.data());
+ const Vector x = Vector::Random(c_->num_rows());
+ Vector y_expected = Vector::Zero(c_->num_cols());
+ c_->LeftMultiplyAndAccumulate(x.data(), y_expected.data());
y_expected.array() *= scale.array();
- C_->ScaleColumns(scale.data());
- Vector y_observed = Vector::Zero(C_->num_cols());
- C_->LeftMultiplyAndAccumulate(x.data(), y_observed.data());
+ c_->ScaleColumns(scale.data());
+ Vector y_observed = Vector::Zero(c_->num_cols());
+ c_->LeftMultiplyAndAccumulate(x.data(), y_observed.data());
EXPECT_GT(y_expected.norm(), 1.);
EXPECT_LT((y_observed - y_expected).norm(), 1e-12 * y_expected.norm());
}
TEST_F(BlockSparseMatrixTest, ScaleColumnsParallelTest) {
- const Vector scale = Vector::Random(C_->num_cols()).cwiseAbs();
+ const Vector scale = Vector::Random(c_->num_cols()).cwiseAbs();
- const Vector x = Vector::Random(C_->num_rows());
- Vector y_expected = Vector::Zero(C_->num_cols());
- C_->LeftMultiplyAndAccumulate(x.data(), y_expected.data());
+ const Vector x = Vector::Random(c_->num_rows());
+ Vector y_expected = Vector::Zero(c_->num_cols());
+ c_->LeftMultiplyAndAccumulate(x.data(), y_expected.data());
y_expected.array() *= scale.array();
- C_->ScaleColumns(scale.data(), &context_, kNumThreads);
- Vector y_observed = Vector::Zero(C_->num_cols());
- C_->LeftMultiplyAndAccumulate(x.data(), y_observed.data());
+ c_->ScaleColumns(scale.data(), &context_, kNumThreads);
+ Vector y_observed = Vector::Zero(c_->num_cols());
+ c_->LeftMultiplyAndAccumulate(x.data(), y_observed.data());
EXPECT_GT(y_expected.norm(), 1.);
EXPECT_LT((y_observed - y_expected).norm(), 1e-12 * y_expected.norm());
@@ -266,8 +266,8 @@
TEST_F(BlockSparseMatrixTest, ToDenseMatrixTest) {
Matrix m_a;
Matrix m_b;
- A_->ToDenseMatrix(&m_a);
- B_->ToDenseMatrix(&m_b);
+ a_->ToDenseMatrix(&m_a);
+ b_->ToDenseMatrix(&m_b);
EXPECT_LT((m_a - m_b).norm(), 1e-12);
}
@@ -276,25 +276,25 @@
CreateLinearLeastSquaresProblemFromId(2);
std::unique_ptr<BlockSparseMatrix> m(
down_cast<BlockSparseMatrix*>(problem->A.release()));
- A_->AppendRows(*m);
- EXPECT_EQ(A_->num_rows(), 2 * m->num_rows());
- EXPECT_EQ(A_->num_cols(), m->num_cols());
+ a_->AppendRows(*m);
+ EXPECT_EQ(a_->num_rows(), 2 * m->num_rows());
+ EXPECT_EQ(a_->num_cols(), m->num_cols());
problem = CreateLinearLeastSquaresProblemFromId(1);
std::unique_ptr<TripletSparseMatrix> m2(
down_cast<TripletSparseMatrix*>(problem->A.release()));
- B_->AppendRows(*m2);
+ b_->AppendRows(*m2);
- Vector y_a = Vector::Zero(A_->num_rows());
- Vector y_b = Vector::Zero(A_->num_rows());
- for (int i = 0; i < A_->num_cols(); ++i) {
- Vector x = Vector::Zero(A_->num_cols());
+ Vector y_a = Vector::Zero(a_->num_rows());
+ Vector y_b = Vector::Zero(a_->num_rows());
+ for (int i = 0; i < a_->num_cols(); ++i) {
+ Vector x = Vector::Zero(a_->num_cols());
x[i] = 1.0;
y_a.setZero();
y_b.setZero();
- A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
- B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
+ a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+ b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
EXPECT_LT((y_a - y_b).norm(), 1e-12);
}
}
@@ -304,7 +304,7 @@
std::unique_ptr<BlockSparseMatrix> m(
down_cast<BlockSparseMatrix*>(problem->A.release()));
- auto block_structure = A_->block_structure();
+ auto block_structure = a_->block_structure();
// Several AppendRows and DeleteRowBlocks operations are applied to matrix,
// with regular and transpose block structures being compared after each
@@ -315,14 +315,14 @@
const int num_row_blocks_to_delete[] = {0, -1, 1, -1, 8, -1, 10};
for (auto& t : num_row_blocks_to_delete) {
if (t == -1) {
- A_->AppendRows(*m);
+ a_->AppendRows(*m);
} else if (t > 0) {
CHECK_GE(block_structure->rows.size(), t);
- A_->DeleteRowBlocks(t);
+ a_->DeleteRowBlocks(t);
}
- auto block_structure = A_->block_structure();
- auto transpose_block_structure = A_->transpose_block_structure();
+ auto block_structure = a_->block_structure();
+ auto transpose_block_structure = a_->transpose_block_structure();
ASSERT_NE(block_structure, nullptr);
ASSERT_NE(transpose_block_structure, nullptr);
@@ -378,7 +378,7 @@
}
TEST_F(BlockSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
- const std::vector<Block>& column_blocks = A_->block_structure()->cols;
+ const std::vector<Block>& column_blocks = a_->block_structure()->cols;
const int num_cols =
column_blocks.back().size + column_blocks.back().position;
Vector diagonal(num_cols);
@@ -388,39 +388,39 @@
std::unique_ptr<BlockSparseMatrix> appendage(
BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks));
- A_->AppendRows(*appendage);
+ a_->AppendRows(*appendage);
Vector y_a, y_b;
- y_a.resize(A_->num_rows());
- y_b.resize(A_->num_rows());
- for (int i = 0; i < A_->num_cols(); ++i) {
- Vector x = Vector::Zero(A_->num_cols());
+ y_a.resize(a_->num_rows());
+ y_b.resize(a_->num_rows());
+ for (int i = 0; i < a_->num_cols(); ++i) {
+ Vector x = Vector::Zero(a_->num_cols());
x[i] = 1.0;
y_a.setZero();
y_b.setZero();
- A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
- B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
- EXPECT_LT((y_a.head(B_->num_rows()) - y_b.head(B_->num_rows())).norm(),
+ a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+ b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
+ EXPECT_LT((y_a.head(b_->num_rows()) - y_b.head(b_->num_rows())).norm(),
1e-12);
- Vector expected_tail = Vector::Zero(A_->num_cols());
+ Vector expected_tail = Vector::Zero(a_->num_cols());
expected_tail(i) = diagonal(i);
- EXPECT_LT((y_a.tail(A_->num_cols()) - expected_tail).norm(), 1e-12);
+ EXPECT_LT((y_a.tail(a_->num_cols()) - expected_tail).norm(), 1e-12);
}
- A_->DeleteRowBlocks(column_blocks.size());
- EXPECT_EQ(A_->num_rows(), B_->num_rows());
- EXPECT_EQ(A_->num_cols(), B_->num_cols());
+ a_->DeleteRowBlocks(column_blocks.size());
+ EXPECT_EQ(a_->num_rows(), b_->num_rows());
+ EXPECT_EQ(a_->num_cols(), b_->num_cols());
- y_a.resize(A_->num_rows());
- y_b.resize(A_->num_rows());
- for (int i = 0; i < A_->num_cols(); ++i) {
- Vector x = Vector::Zero(A_->num_cols());
+ y_a.resize(a_->num_rows());
+ y_b.resize(a_->num_rows());
+ for (int i = 0; i < a_->num_cols(); ++i) {
+ Vector x = Vector::Zero(a_->num_cols());
x[i] = 1.0;
y_a.setZero();
y_b.setZero();
- A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
- B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
+ a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+ b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
EXPECT_LT((y_a - y_b).norm(), 1e-12);
}
}
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index a1a3c6e..3f867dc 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -160,8 +160,6 @@
preconditioner_options.context = options_.context;
if (options_.preconditioner_type == JACOBI) {
- // TODO: BlockSparseJacobiPreconditioner::RightMultiply will benefit from
- // multithreading
preconditioner_ = std::make_unique<BlockSparseJacobiPreconditioner>(
preconditioner_options, *A);
} else if (options_.preconditioner_type == SUBSET) {
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index 16b20a7..ace075e 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -186,17 +186,10 @@
case SCHUR_POWER_SERIES_EXPANSION:
// Ignoring the value of spse_tolerance to ensure preconditioner stays
// fixed during the iterations of cg.
- // TODO: In PowerSeriesExpansionPreconditioner::RightMultiplyAndAccumulate
- // only operations performed via ImplicitSchurComplement are threaded.
- // PowerSeriesExpansionPreconditioner will benefit from multithreading
- // applied to remaning operations (block-sparse right product and several
- // vector operations)
preconditioner_ = std::make_unique<PowerSeriesExpansionPreconditioner>(
schur_complement_.get(), options_.max_num_spse_iterations, 0);
break;
case SCHUR_JACOBI:
- // TODO: SchurJacobiPreconditioner::RightMultiply will benefit from
- // multithreading
preconditioner_ = std::make_unique<SchurJacobiPreconditioner>(
*A->block_structure(), preconditioner_options);
break;
diff --git a/internal/ceres/parallel_for_cxx.cc b/internal/ceres/parallel_for_cxx.cc
index 3f47436..44b0be5 100644
--- a/internal/ceres/parallel_for_cxx.cc
+++ b/internal/ceres/parallel_for_cxx.cc
@@ -28,11 +28,13 @@
//
// Author: vitus@google.com (Michael Vitus)
+#include <algorithm>
#include <atomic>
#include <cmath>
#include <condition_variable>
#include <memory>
#include <mutex>
+#include <tuple>
#include "ceres/internal/config.h"
#include "ceres/parallel_for.h"
diff --git a/internal/ceres/parallel_for_test.cc b/internal/ceres/parallel_for_test.cc
index 058b0a3..6c342aa 100644
--- a/internal/ceres/parallel_for_test.cc
+++ b/internal/ceres/parallel_for_test.cc
@@ -36,6 +36,7 @@
#include <numeric>
#include <random>
#include <thread>
+#include <tuple>
#include <vector>
#include "ceres/context_impl.h"
diff --git a/internal/ceres/parallel_vector_operations_benchmark.cc b/internal/ceres/parallel_vector_operations_benchmark.cc
index 7180b54..31a5c7a 100644
--- a/internal/ceres/parallel_vector_operations_benchmark.cc
+++ b/internal/ceres/parallel_vector_operations_benchmark.cc
@@ -25,6 +25,9 @@
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+
#include "benchmark/benchmark.h"
#include "ceres/eigen_vector_ops.h"
#include "ceres/parallel_for.h"
diff --git a/internal/ceres/power_series_expansion_preconditioner.h b/internal/ceres/power_series_expansion_preconditioner.h
index a8bb9a6..20be767 100644
--- a/internal/ceres/power_series_expansion_preconditioner.h
+++ b/internal/ceres/power_series_expansion_preconditioner.h
@@ -41,7 +41,14 @@
// This is a preconditioner via power series expansion of Schur
// complement inverse based on "Weber et al, Power Bundle Adjustment for
// Large-Scale 3D Reconstruction".
-
+//
+//
+// TODO(https://github.com/ceres-solver/ceres-solver/issues/934): In
+// PowerSeriesExpansionPreconditioner::RightMultiplyAndAccumulate only
+// operations performed via ImplicitSchurComplement are threaded.
+// PowerSeriesExpansionPreconditioner will benefit from multithreading
+// applied to remaning operations (block-sparse right product and several
+// vector operations)
class CERES_NO_EXPORT PowerSeriesExpansionPreconditioner
: public Preconditioner {
public:
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc
index e51fdd1..e8589be 100644
--- a/internal/ceres/problem.cc
+++ b/internal/ceres/problem.cc
@@ -212,7 +212,7 @@
impl_->GetResidualBlocksForParameterBlock(values, residual_blocks);
}
-const Problem::Options & Problem::options() const { return impl_->options(); }
+const Problem::Options& Problem::options() const { return impl_->options(); }
internal::ProblemImpl* Problem::mutable_impl() { return impl_.get(); }
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
index 22b37b6..b51c48d 100644
--- a/internal/ceres/problem_impl.h
+++ b/internal/ceres/problem_impl.h
@@ -166,7 +166,7 @@
return residual_block_set_;
}
- const Problem::Options & options() const { return options_; }
+ const Problem::Options& options() const { return options_; }
ContextImpl* context() { return context_impl_; }
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
index ddf471c..fbdeab4 100644
--- a/internal/ceres/schur_jacobi_preconditioner.h
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -73,6 +73,8 @@
// preconditioner.Update(A, nullptr);
// preconditioner.RightMultiplyAndAccumulate(x, y);
//
+// TODO(https://github.com/ceres-solver/ceres-solver/issues/935):
+// SchurJacobiPreconditioner::RightMultiply will benefit from multithreading
class CERES_NO_EXPORT SchurJacobiPreconditioner
: public BlockSparseMatrixPreconditioner {
public:
diff --git a/internal/ceres/sparse_matrix.cc b/internal/ceres/sparse_matrix.cc
index 584ac1d..77c28e7 100644
--- a/internal/ceres/sparse_matrix.cc
+++ b/internal/ceres/sparse_matrix.cc
@@ -42,12 +42,12 @@
SquaredColumnNorm(x);
}
-void SparseMatrix::ScaleColumns(const double* x,
+void SparseMatrix::ScaleColumns(const double* scale,
ContextImpl* context,
int num_threads) {
(void)context;
(void)num_threads;
- ScaleColumns(x);
+ ScaleColumns(scale);
}
} // namespace ceres::internal