Modernize more
Apply clang-tidy Google and modernize fixes without trailing return type
using:
$ clang-tidy -p <build-dir> \
-checks='-*,google-*,modernize-*,-modernize-use-trailing-return-type' {} -fix
Change-Id: I7450cc58ea9abf928f73a467e87876083217fa26
diff --git a/internal/ceres/autodiff_benchmarks/relative_pose_error.h b/internal/ceres/autodiff_benchmarks/relative_pose_error.h
index b5c1a93..a54a92f 100644
--- a/internal/ceres/autodiff_benchmarks/relative_pose_error.h
+++ b/internal/ceres/autodiff_benchmarks/relative_pose_error.h
@@ -33,6 +33,7 @@
#define CERES_INTERNAL_AUTODIFF_BENCHMARK_RELATIVE_POSE_ERROR_H_
#include <Eigen/Dense>
+#include <utility>
#include "ceres/rotation.h"
@@ -43,9 +44,8 @@
// poses T_w_i and T_w_j. For the residual we use the log of the the residual
// pose, in split representation SO(3) x R^3.
struct RelativePoseError {
- RelativePoseError(const Eigen::Quaterniond& q_i_j,
- const Eigen::Vector3d& t_i_j)
- : meas_q_i_j_(q_i_j), meas_t_i_j_(t_i_j) {}
+ RelativePoseError(Eigen::Quaterniond q_i_j, Eigen::Vector3d t_i_j)
+ : meas_q_i_j_(std::move(q_i_j)), meas_t_i_j_(std::move(t_i_j)) {}
template <typename T>
inline bool operator()(const T* const pose_i_ptr,
diff --git a/internal/ceres/autodiff_cost_function_test.cc b/internal/ceres/autodiff_cost_function_test.cc
index cc340f6..a59d524 100644
--- a/internal/ceres/autodiff_cost_function_test.cc
+++ b/internal/ceres/autodiff_cost_function_test.cc
@@ -57,7 +57,7 @@
new AutoDiffCostFunction<BinaryScalarCost, 1, 2, 2>(
new BinaryScalarCost(1.0));
- double** parameters = new double*[2];
+ auto** parameters = new double*[2];
parameters[0] = new double[2];
parameters[1] = new double[2];
@@ -67,7 +67,7 @@
parameters[1][0] = 3;
parameters[1][1] = 4;
- double** jacobians = new double*[2];
+ auto** jacobians = new double*[2];
jacobians[0] = new double[2];
jacobians[1] = new double[2];
@@ -126,8 +126,8 @@
1,
1>(new TenParameterCost);
- double** parameters = new double*[10];
- double** jacobians = new double*[10];
+ auto** parameters = new double*[10];
+ auto** jacobians = new double*[10];
for (int i = 0; i < 10; ++i) {
parameters[i] = new double[1];
parameters[i][0] = i;
diff --git a/internal/ceres/autodiff_local_parameterization_test.cc b/internal/ceres/autodiff_local_parameterization_test.cc
index 36fd3c9..9dc2551 100644
--- a/internal/ceres/autodiff_local_parameterization_test.cc
+++ b/internal/ceres/autodiff_local_parameterization_test.cc
@@ -194,13 +194,13 @@
x[2] * x[2] +
x[3] * x[3]);
// clang-format on
- for (int i = 0; i < 4; ++i) {
- x[i] = x[i] / norm_x;
+ for (double& x_i : x) {
+ x_i = x_i / norm_x;
}
double delta[3] = {0.24, 0.15, 0.10};
- for (int i = 0; i < 3; ++i) {
- delta[i] = delta[i] * 1e-14;
+ for (double& delta_i : delta) {
+ delta_i = delta_i * 1e-14;
}
QuaternionParameterizationTestHelper(x, delta);
@@ -215,8 +215,8 @@
x[3] * x[3]);
// clang-format on
- for (int i = 0; i < 4; ++i) {
- x[i] = x[i] / norm_x;
+ for (double& x_i : x) {
+ x_i = x_i / norm_x;
}
double delta[3] = {0.24, 0.15, 0.10};
diff --git a/internal/ceres/autodiff_test.cc b/internal/ceres/autodiff_test.cc
index 2d56400..69ce584 100644
--- a/internal/ceres/autodiff_test.cc
+++ b/internal/ceres/autodiff_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -172,8 +172,8 @@
// Make random P and X, in a single vector.
double PX[12 + 4];
- for (int i = 0; i < 12 + 4; ++i) {
- PX[i] = RandDouble();
+ for (double& PX_i : PX) {
+ PX_i = RandDouble();
}
// Handy names for the P and X parts.
@@ -292,7 +292,7 @@
// Make random parameter vector.
double qcX[4 + 3 + 3];
- for (int i = 0; i < 4 + 3 + 3; ++i) qcX[i] = RandDouble();
+ for (double& qcX_i : qcX) qcX_i = RandDouble();
// Handy names.
double* q = qcX;
@@ -658,7 +658,7 @@
// this function.
y += 1;
- typedef Jet<double, 2> JetT;
+ using JetT = Jet<double, 2>;
FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(3);
// Need this to makes sure that x does not get optimized out.
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
index da8fc94..6e979de 100644
--- a/internal/ceres/block_jacobi_preconditioner.cc
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -60,16 +60,15 @@
for (int i = 0; i < bs->rows.size(); ++i) {
const int row_block_size = bs->rows[i].block.size;
const std::vector<Cell>& cells = bs->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- const int block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ const int block_id = cell.block_id;
const int col_block_size = bs->cols[block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info =
m_->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride);
MatrixRef m(cell_info->values, row_stride, col_stride);
- ConstMatrixRef b(
- values + cells[j].position, row_block_size, col_block_size);
+ ConstMatrixRef b(values + cell.position, row_block_size, col_block_size);
m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
}
}
diff --git a/internal/ceres/block_jacobi_preconditioner_test.cc b/internal/ceres/block_jacobi_preconditioner_test.cc
index 6d124f1..06b69a2 100644
--- a/internal/ceres/block_jacobi_preconditioner_test.cc
+++ b/internal/ceres/block_jacobi_preconditioner_test.cc
@@ -67,8 +67,7 @@
BlockJacobiPreconditioner pre(*A);
pre.Update(*A, D.get());
- BlockRandomAccessDiagonalMatrix* m =
- const_cast<BlockRandomAccessDiagonalMatrix*>(&pre.matrix());
+ auto* m = const_cast<BlockRandomAccessDiagonalMatrix*>(&pre.matrix());
EXPECT_EQ(m->num_rows(), A->num_cols());
EXPECT_EQ(m->num_cols(), A->num_cols());
diff --git a/internal/ceres/block_jacobian_writer.cc b/internal/ceres/block_jacobian_writer.cc
index e0f6ec0..a70660f 100644
--- a/internal/ceres/block_jacobian_writer.cc
+++ b/internal/ceres/block_jacobian_writer.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -69,8 +69,7 @@
// matrix. Also compute the number of jacobian blocks.
int f_block_pos = 0;
int num_jacobian_blocks = 0;
- for (int i = 0; i < residual_blocks.size(); ++i) {
- ResidualBlock* residual_block = residual_blocks[i];
+ for (auto* residual_block : residual_blocks) {
const int num_residuals = residual_block->NumResiduals();
const int num_parameter_blocks = residual_block->NumParameterBlocks();
@@ -152,7 +151,7 @@
}
std::unique_ptr<SparseMatrix> BlockJacobianWriter::CreateJacobian() const {
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ auto* bs = new CompressedRowBlockStructure;
const vector<ParameterBlock*>& parameter_blocks =
program_->parameter_blocks();
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
index af372ad..f55f3b3 100644
--- a/internal/ceres/block_random_access_diagonal_matrix.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -58,10 +58,10 @@
int num_cols = 0;
int num_nonzeros = 0;
vector<int> block_positions;
- for (int i = 0; i < blocks_.size(); ++i) {
+ for (int block_size : blocks_) {
block_positions.push_back(num_cols);
- num_cols += blocks_[i];
- num_nonzeros += blocks_[i] * blocks_[i];
+ num_cols += block_size;
+ num_nonzeros += block_size * block_size;
}
VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
@@ -123,8 +123,7 @@
void BlockRandomAccessDiagonalMatrix::Invert() {
double* values = tsm_->mutable_values();
- for (int i = 0; i < blocks_.size(); ++i) {
- const int block_size = blocks_[i];
+ for (int block_size : blocks_) {
MatrixRef block(values, block_size, block_size);
block = block.selfadjointView<Eigen::Upper>().llt().solve(
Matrix::Identity(block_size, block_size));
@@ -137,8 +136,7 @@
CHECK(x != nullptr);
CHECK(y != nullptr);
const double* values = tsm_->values();
- for (int i = 0; i < blocks_.size(); ++i) {
- const int block_size = blocks_[i];
+ for (int block_size : blocks_) {
ConstMatrixRef block(values, block_size, block_size);
VectorRef(y, block_size).noalias() += block * ConstVectorRef(x, block_size);
x += block_size;
diff --git a/internal/ceres/block_random_access_matrix.h b/internal/ceres/block_random_access_matrix.h
index ec2c9e1..48759b7 100644
--- a/internal/ceres/block_random_access_matrix.h
+++ b/internal/ceres/block_random_access_matrix.h
@@ -86,10 +86,10 @@
// Structure to carry a pointer to the array containing a cell and the
// mutex guarding it.
struct CERES_NO_EXPORT CellInfo {
- CellInfo() : values(nullptr) {}
+ CellInfo() = default;
explicit CellInfo(double* values) : values(values) {}
- double* values;
+ double* values{nullptr};
std::mutex m;
};
diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
index 0bedf3c..a026daa 100644
--- a/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/internal/ceres/block_random_access_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -58,9 +58,9 @@
// rows/columns.
int num_cols = 0;
block_positions_.reserve(blocks_.size());
- for (int i = 0; i < blocks_.size(); ++i) {
+ for (int block_size : blocks_) {
block_positions_.push_back(num_cols);
- num_cols += blocks_[i];
+ num_cols += block_size;
}
// Count the number of scalar non-zero entries and build the layout
@@ -87,7 +87,7 @@
for (const auto& block_pair : block_pairs) {
const int row_block_size = blocks_[block_pair.first];
const int col_block_size = blocks_[block_pair.second];
- cell_values_.push_back(make_pair(block_pair, values + pos));
+ cell_values_.emplace_back(block_pair, values + pos);
layout_[IntPairToLong(block_pair.first, block_pair.second)] =
new CellInfo(values + pos);
pos += row_block_size * col_block_size;
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
index 43886bd..b31a2ad 100644
--- a/internal/ceres/block_random_access_sparse_matrix.h
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -112,7 +112,7 @@
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
- typedef std::unordered_map<long int, CellInfo*> LayoutType;
+ using LayoutType = std::unordered_map<long, CellInfo*>;
LayoutType layout_;
// In order traversal of contents of the matrix. This allows us to
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
index a0f55f8..31ea39d 100644
--- a/internal/ceres/block_sparse_matrix.cc
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -56,8 +56,8 @@
CHECK(block_structure_ != nullptr);
// Count the number of columns in the matrix.
- for (int i = 0; i < block_structure_->cols.size(); ++i) {
- num_cols_ += block_structure_->cols[i].size;
+ for (auto& col : block_structure_->cols) {
+ num_cols_ += col.size;
}
// Count the number of non-zero entries and the number of rows in
@@ -67,8 +67,8 @@
num_rows_ += row_block_size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
num_nonzeros_ += col_block_size * row_block_size;
}
@@ -96,12 +96,12 @@
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values_.get() + cells[j].position,
+ values_.get() + cell.position,
row_block_size,
col_block_size,
x + col_block_pos,
@@ -118,12 +118,12 @@
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values_.get() + cells[j].position,
+ values_.get() + cell.position,
row_block_size,
col_block_size,
x + row_block_pos,
@@ -138,12 +138,12 @@
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
const MatrixRef m(
- values_.get() + cells[j].position, row_block_size, col_block_size);
+ values_.get() + cell.position, row_block_size, col_block_size);
VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
}
}
@@ -155,12 +155,12 @@
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixRef m(
- values_.get() + cells[j].position, row_block_size, col_block_size);
+ values_.get() + cell.position, row_block_size, col_block_size);
m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
}
}
@@ -177,11 +177,11 @@
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
- int jac_pos = cells[j].position;
+ int jac_pos = cell.position;
m.block(row_block_pos, col_block_pos, row_block_size, col_block_size) +=
MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
}
@@ -200,11 +200,11 @@
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
- int jac_pos = cells[j].position;
+ int jac_pos = cell.position;
for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
matrix->mutable_rows()[jac_pos] = row_block_pos + r;
@@ -229,11 +229,11 @@
const int row_block_pos = block_structure_->rows[i].block.position;
const int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
- for (int j = 0; j < cells.size(); ++j) {
- const int col_block_id = cells[j].block_id;
+ for (const auto& cell : cells) {
+ const int col_block_id = cell.block_id;
const int col_block_size = block_structure_->cols[col_block_id].size;
const int col_block_pos = block_structure_->cols[col_block_id].position;
- int jac_pos = cells[j].position;
+ int jac_pos = cell.position;
for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c) {
fprintf(file,
@@ -250,7 +250,7 @@
std::unique_ptr<BlockSparseMatrix> BlockSparseMatrix::CreateDiagonalMatrix(
const double* diagonal, const std::vector<Block>& column_blocks) {
// Create the block structure for the diagonal matrix.
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
+ auto* bs = new CompressedRowBlockStructure();
bs->cols = column_blocks;
int position = 0;
bs->rows.resize(column_blocks.size(), CompressedRow(1));
@@ -269,8 +269,8 @@
// Fill the values array of the block sparse matrix.
double* values = matrix->mutable_values();
- for (int i = 0; i < column_blocks.size(); ++i) {
- const int size = column_blocks[i].size;
+ for (const auto& column_block : column_blocks) {
+ const int size = column_block.size;
for (int j = 0; j < size; ++j) {
// (j + 1) * size is compact way of accessing the (j,j) entry.
values[j * (size + 1)] = diagonal[j];
@@ -346,7 +346,7 @@
CHECK_GT(options.block_density, 0.0);
CHECK_LE(options.block_density, 1.0);
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
+ auto* bs = new CompressedRowBlockStructure();
if (options.col_blocks.empty()) {
CHECK_GT(options.num_col_blocks, 0);
CHECK_GT(options.min_col_block_size, 0);
@@ -360,7 +360,7 @@
const int delta_block_size =
Uniform(options.max_col_block_size - options.min_col_block_size);
const int col_block_size = options.min_col_block_size + delta_block_size;
- bs->cols.push_back(Block(col_block_size, col_block_position));
+ bs->cols.emplace_back(col_block_size, col_block_position);
col_block_position += col_block_size;
}
} else {
@@ -377,7 +377,7 @@
const int delta_block_size =
Uniform(options.max_row_block_size - options.min_row_block_size);
const int row_block_size = options.min_row_block_size + delta_block_size;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = row_block_size;
row.block.position = row_block_position;
@@ -385,7 +385,7 @@
for (int c = 0; c < bs->cols.size(); ++c) {
if (RandDouble() > options.block_density) continue;
- row.cells.push_back(Cell());
+ row.cells.emplace_back();
Cell& cell = row.cells.back();
cell.block_id = c;
cell.position = value_position;
diff --git a/internal/ceres/block_sparse_matrix.h b/internal/ceres/block_sparse_matrix.h
index 8e555b0..75b0deb 100644
--- a/internal/ceres/block_sparse_matrix.h
+++ b/internal/ceres/block_sparse_matrix.h
@@ -141,7 +141,7 @@
// information
class CERES_NO_EXPORT BlockSparseMatrixData {
public:
- BlockSparseMatrixData(const BlockSparseMatrix& m)
+ explicit BlockSparseMatrixData(const BlockSparseMatrix& m)
: block_structure_(m.block_structure()), values_(m.values()){};
BlockSparseMatrixData(const CompressedRowBlockStructure* block_structure,
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
index 747cdef..d8cf987 100644
--- a/internal/ceres/block_sparse_matrix_test.cc
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -187,9 +187,9 @@
TEST(BlockSparseMatrix, CreateDiagonalMatrix) {
std::vector<Block> column_blocks;
- column_blocks.push_back(Block(2, 0));
- column_blocks.push_back(Block(1, 2));
- column_blocks.push_back(Block(3, 3));
+ column_blocks.emplace_back(2, 0);
+ column_blocks.emplace_back(1, 2);
+ column_blocks.emplace_back(3, 3);
const int num_cols =
column_blocks.back().size + column_blocks.back().position;
Vector diagonal(num_cols);
diff --git a/internal/ceres/block_structure.h b/internal/ceres/block_structure.h
index 2039664..fe7574c 100644
--- a/internal/ceres/block_structure.h
+++ b/internal/ceres/block_structure.h
@@ -46,25 +46,25 @@
namespace ceres {
namespace internal {
-typedef int32_t BlockSize;
+using BlockSize = int32_t;
struct CERES_NO_EXPORT Block {
- Block() : size(-1), position(-1) {}
+ Block() = default;
Block(int size_, int position_) : size(size_), position(position_) {}
- BlockSize size;
- int position; // Position along the row/column.
+ BlockSize size{-1};
+ int position{-1}; // Position along the row/column.
};
struct CERES_NO_EXPORT Cell {
- Cell() : block_id(-1), position(-1) {}
+ Cell() = default;
Cell(int block_id_, int position_)
: block_id(block_id_), position(position_) {}
// Column or row block id as the case maybe.
- int block_id;
+ int block_id{-1};
// Where in the values array of the jacobian is this cell located.
- int position;
+ int position{-1};
};
// Order cell by their block_id;
@@ -75,13 +75,13 @@
// Construct a CompressedList with the cells containing num_cells
// entries.
- CompressedList(int num_cells) : cells(num_cells) {}
+ explicit CompressedList(int num_cells) : cells(num_cells) {}
Block block;
std::vector<Cell> cells;
};
-typedef CompressedList CompressedRow;
-typedef CompressedList CompressedColumn;
+using CompressedRow = CompressedList;
+using CompressedColumn = CompressedList;
struct CERES_NO_EXPORT CompressedRowBlockStructure {
std::vector<Block> cols;
diff --git a/internal/ceres/bundle_adjustment_test_util.h b/internal/ceres/bundle_adjustment_test_util.h
index fe87405..44281b6 100644
--- a/internal/ceres/bundle_adjustment_test_util.h
+++ b/internal/ceres/bundle_adjustment_test_util.h
@@ -242,7 +242,7 @@
};
double BundleAdjustmentProblem::kResidualTolerance = 1e-4;
-typedef SystemTest<BundleAdjustmentProblem> BundleAdjustmentTest;
+using BundleAdjustmentTest = SystemTest<BundleAdjustmentProblem>;
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/c_api.cc b/internal/ceres/c_api.cc
index 784504f..8ea344d 100644
--- a/internal/ceres/c_api.cc
+++ b/internal/ceres/c_api.cc
@@ -145,7 +145,7 @@
int num_parameter_blocks,
int* parameter_block_sizes,
double** parameters) {
- Problem* ceres_problem = reinterpret_cast<Problem*>(problem);
+ auto* ceres_problem = reinterpret_cast<Problem*>(problem);
auto callback_cost_function =
std::make_unique<CallbackCostFunction>(cost_function,
@@ -169,7 +169,7 @@
}
void ceres_solve(ceres_problem_t* c_problem) {
- Problem* problem = reinterpret_cast<Problem*>(c_problem);
+ auto* problem = reinterpret_cast<Problem*>(c_problem);
// TODO(keir): Obviously, this way of setting options won't scale or last.
// Instead, figure out a way to specify some of the options without
diff --git a/internal/ceres/c_api_test.cc b/internal/ceres/c_api_test.cc
index 2473116..1126743 100644
--- a/internal/ceres/c_api_test.cc
+++ b/internal/ceres/c_api_test.cc
@@ -114,7 +114,7 @@
double** parameters,
double* residuals,
double** jacobians) {
- double* measurement = (double*)user_data;
+ auto* measurement = static_cast<double*>(user_data);
double x = measurement[0];
double y = measurement[1];
double m = parameters[0][0];
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
index 68998fa..c7565c1 100644
--- a/internal/ceres/canonical_views_clustering.cc
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -44,8 +44,8 @@
using std::vector;
-typedef std::unordered_map<int, int> IntMap;
-typedef std::unordered_set<int> IntSet;
+using IntMap = std::unordered_map<int, int>;
+using IntSet = std::unordered_set<int>;
class CERES_NO_EXPORT CanonicalViewsClustering {
public:
@@ -174,9 +174,9 @@
difference -= options_.size_penalty_weight;
// Orthogonality.
- for (int i = 0; i < centers.size(); ++i) {
+ for (int center : centers) {
difference -= options_.similarity_penalty_weight *
- graph_->EdgeWeight(centers[i], candidate);
+ graph_->EdgeWeight(center, candidate);
}
return difference;
diff --git a/internal/ceres/casts.h b/internal/ceres/casts.h
index 21445c8..04d8ba4 100644
--- a/internal/ceres/casts.h
+++ b/internal/ceres/casts.h
@@ -38,7 +38,7 @@
// Identity metafunction.
template <class T>
struct identity_ {
- typedef T type;
+ using type = T;
};
// Use implicit_cast as a safe version of static_cast or const_cast
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index e89305f..cca72bc 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -31,6 +31,7 @@
#include "ceres/cgnr_solver.h"
#include <memory>
+#include <utility>
#include "ceres/block_jacobi_preconditioner.h"
#include "ceres/cgnr_linear_operator.h"
@@ -44,8 +45,8 @@
namespace ceres {
namespace internal {
-CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
- : options_(options) {
+CgnrSolver::CgnrSolver(LinearSolver::Options options)
+ : options_(std::move(options)) {
if (options_.preconditioner_type != JACOBI &&
options_.preconditioner_type != IDENTITY &&
options_.preconditioner_type != SUBSET) {
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h
index 06a6118..25e62e9 100644
--- a/internal/ceres/cgnr_solver.h
+++ b/internal/ceres/cgnr_solver.h
@@ -52,7 +52,7 @@
// block diagonal preconditioning is supported.
class CERES_NO_EXPORT CgnrSolver final : public BlockSparseMatrixSolver {
public:
- explicit CgnrSolver(const LinearSolver::Options& options);
+ explicit CgnrSolver(LinearSolver::Options options);
CgnrSolver(const CgnrSolver&) = delete;
void operator=(const CgnrSolver&) = delete;
~CgnrSolver() override;
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
index c94d719..55b30a2 100644
--- a/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -97,8 +97,7 @@
// Count the number of jacobian nonzeros.
int num_jacobian_nonzeros = 0;
- for (int i = 0; i < residual_blocks.size(); ++i) {
- ResidualBlock* residual_block = residual_blocks[i];
+ for (auto* residual_block : residual_blocks) {
const int num_residuals = residual_block->NumResiduals();
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
@@ -126,8 +125,7 @@
int row_pos = 0;
rows[0] = 0;
- for (int i = 0; i < residual_blocks.size(); ++i) {
- const ResidualBlock* residual_block = residual_blocks[i];
+ for (auto* residual_block : residual_blocks) {
const int num_parameter_blocks = residual_block->NumParameterBlocks();
// Count the number of derivatives for a row of this residual block and
@@ -169,9 +167,9 @@
// parameter vector. This code mirrors that in Write(), where jacobian
// values are updated.
int col_pos = 0;
- for (int j = 0; j < parameter_indices.size(); ++j) {
+ for (int parameter_index : parameter_indices) {
ParameterBlock* parameter_block =
- program_->parameter_blocks()[parameter_indices[j]];
+ program_->parameter_blocks()[parameter_index];
const int parameter_block_size = parameter_block->TangentSize();
for (int r = 0; r < num_residuals; ++r) {
@@ -198,8 +196,7 @@
int residual_offset,
double** jacobians,
SparseMatrix* base_jacobian) {
- CompressedRowSparseMatrix* jacobian =
- down_cast<CompressedRowSparseMatrix*>(base_jacobian);
+ auto* jacobian = down_cast<CompressedRowSparseMatrix*>(base_jacobian);
double* jacobian_values = jacobian->mutable_values();
const int* jacobian_rows = jacobian->rows();
@@ -216,10 +213,10 @@
// Iterate over the jacobian blocks in increasing order of their
// positions in the reduced parameter vector.
- for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+ for (auto& evaluated_jacobian_block : evaluated_jacobian_blocks) {
const ParameterBlock* parameter_block =
- program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
- const int argument = evaluated_jacobian_blocks[i].second;
+ program_->parameter_blocks()[evaluated_jacobian_block.first];
+ const int argument = evaluated_jacobian_block.second;
const int parameter_block_size = parameter_block->TangentSize();
// Copy one row of the jacobian block at a time.
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index d8743cb..db103d9 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2017 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -542,9 +542,9 @@
const double* diagonal, const vector<int>& blocks) {
int num_rows = 0;
int num_nonzeros = 0;
- for (int i = 0; i < blocks.size(); ++i) {
- num_rows += blocks[i];
- num_nonzeros += blocks[i] * blocks[i];
+ for (int block_size : blocks) {
+ num_rows += block_size;
+ num_nonzeros += block_size * block_size;
}
std::unique_ptr<CompressedRowSparseMatrix> matrix =
@@ -558,8 +558,7 @@
int idx_cursor = 0;
int col_cursor = 0;
- for (int i = 0; i < blocks.size(); ++i) {
- const int block_size = blocks[i];
+ for (int block_size : blocks) {
for (int r = 0; r < block_size; ++r) {
*(rows++) = idx_cursor;
values[idx_cursor + r] = diagonal[col_cursor + r];
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 3a2768c..e898a5a 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -381,7 +381,7 @@
}
}
-typedef ::testing::tuple<CompressedRowSparseMatrix::StorageType> Param;
+using Param = ::testing::tuple<CompressedRowSparseMatrix::StorageType>;
static std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
if (::testing::get<0>(info.param) ==
diff --git a/internal/ceres/concurrent_queue.h b/internal/ceres/concurrent_queue.h
index a04d147..1e74153 100644
--- a/internal/ceres/concurrent_queue.h
+++ b/internal/ceres/concurrent_queue.h
@@ -78,7 +78,7 @@
class ConcurrentQueue {
public:
// Defaults the queue to blocking on Wait calls.
- ConcurrentQueue() : wait_(true) {}
+ ConcurrentQueue() = default;
// Atomically push an element onto the queue. If a thread was waiting for an
// element, wake it up.
@@ -149,7 +149,7 @@
std::queue<T> queue_;
// If true, signals that callers of Wait will block waiting to pop an
// element off the queue.
- bool wait_;
+ bool wait_{true};
};
} // namespace internal
diff --git a/internal/ceres/conditioned_cost_function_test.cc b/internal/ceres/conditioned_cost_function_test.cc
index f21f84c..d0d5d2d 100644
--- a/internal/ceres/conditioned_cost_function_test.cc
+++ b/internal/ceres/conditioned_cost_function_test.cc
@@ -85,7 +85,7 @@
VectorRef v2_vector(v2, kTestCostFunctionSize, 1);
Matrix identity(kTestCostFunctionSize, kTestCostFunctionSize);
identity.setIdentity();
- NormalPrior* difference_cost_function = new NormalPrior(identity, v2_vector);
+ auto* difference_cost_function = new NormalPrior(identity, v2_vector);
std::vector<CostFunction*> conditioners;
for (int i = 0; i < kTestCostFunctionSize; i++) {
@@ -127,7 +127,7 @@
VectorRef v2_vector(v2, kTestCostFunctionSize, 1);
Matrix identity =
Matrix::Identity(kTestCostFunctionSize, kTestCostFunctionSize);
- NormalPrior* difference_cost_function = new NormalPrior(identity, v2_vector);
+ auto* difference_cost_function = new NormalPrior(identity, v2_vector);
CostFunction* conditioner = new LinearCostFunction(2, 7);
std::vector<CostFunction*> conditioners;
for (int i = 0; i < kTestCostFunctionSize; i++) {
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
index 436a5b1..62ae920 100644
--- a/internal/ceres/conjugate_gradients_solver.cc
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -41,6 +41,7 @@
#include <cmath>
#include <cstddef>
+#include <utility>
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
@@ -57,8 +58,8 @@
} // namespace
ConjugateGradientsSolver::ConjugateGradientsSolver(
- const LinearSolver::Options& options)
- : options_(options) {}
+ LinearSolver::Options options)
+ : options_(std::move(options)) {}
LinearSolver::Summary ConjugateGradientsSolver::Solve(
LinearOperator* A,
diff --git a/internal/ceres/conjugate_gradients_solver.h b/internal/ceres/conjugate_gradients_solver.h
index 418508a..99ddb5d 100644
--- a/internal/ceres/conjugate_gradients_solver.h
+++ b/internal/ceres/conjugate_gradients_solver.h
@@ -58,7 +58,7 @@
// LinearSolver::PerSolveOptions::q_tolerance in linear_solver.h.
class CERES_NO_EXPORT ConjugateGradientsSolver final : public LinearSolver {
public:
- explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
+ explicit ConjugateGradientsSolver(LinearSolver::Options options);
Summary Solve(LinearOperator* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index 86ab838..a6e149d 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -94,9 +94,9 @@
// assign zero offsets/empty independent sets to these parameter
// blocks.
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- if (!ordering.IsMember(parameter_blocks[i]->mutable_user_state())) {
- parameter_blocks_.push_back(parameter_blocks[i]);
+ for (auto* parameter_block : parameter_blocks) {
+ if (!ordering.IsMember(parameter_block->mutable_user_state())) {
+ parameter_blocks_.push_back(parameter_block);
independent_set_offsets_.push_back(independent_set_offsets_.back());
}
}
@@ -105,8 +105,7 @@
// block.
residual_blocks_.resize(parameter_block_index.size());
const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
- for (int i = 0; i < residual_blocks.size(); ++i) {
- ResidualBlock* residual_block = residual_blocks[i];
+ for (auto* residual_block : residual_blocks) {
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
@@ -129,8 +128,7 @@
double* parameters,
Solver::Summary* summary) {
// Set the state and mark all parameter blocks constant.
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks_[i];
+ for (auto* parameter_block : parameter_blocks_) {
parameter_block->SetState(parameters + parameter_block->state_offset());
parameter_block->SetConstant();
}
@@ -202,8 +200,8 @@
});
}
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- parameter_blocks_[i]->SetVarying();
+ for (auto* parameter_block : parameter_blocks_) {
+ parameter_block->SetVarying();
}
// for (int i = 0; i < options.num_threads; ++i) {
diff --git a/internal/ceres/corrector_test.cc b/internal/ceres/corrector_test.cc
index 951041e..d2f7c84 100644
--- a/internal/ceres/corrector_test.cc
+++ b/internal/ceres/corrector_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -164,8 +164,8 @@
srand(5);
for (int iter = 0; iter < 10000; ++iter) {
// Initialize the jacobian and residual.
- for (int i = 0; i < 2 * 3; ++i) jacobian[i] = RandDouble();
- for (int i = 0; i < 3; ++i) residuals[i] = RandDouble();
+ for (double& jacobian_entry : jacobian) jacobian_entry = RandDouble();
+ for (double& residual : residuals) residual = RandDouble();
const double sq_norm = res.dot(res);
@@ -230,7 +230,7 @@
srand(5);
for (int iter = 0; iter < 10000; ++iter) {
// Initialize the jacobian.
- for (int i = 0; i < 2 * 3; ++i) jacobian[i] = RandDouble();
+ for (double& jacobian_entry : jacobian) jacobian_entry = RandDouble();
// Zero residuals
res.setZero();
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc
index 80efd66..dfa1a92 100644
--- a/internal/ceres/cost_function_to_functor_test.cc
+++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -376,10 +376,9 @@
}
TEST(CostFunctionToFunctor, DynamicCostFunctionToFunctor) {
- DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>*
- actual_cost_function(
- new DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>(
- new DynamicTwoParameterBlockFunctor));
+ auto* actual_cost_function(
+ new DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>(
+ new DynamicTwoParameterBlockFunctor));
actual_cost_function->AddParameterBlock(2);
actual_cost_function->AddParameterBlock(2);
actual_cost_function->SetNumResiduals(2);
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index 5075609..324b553 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -84,8 +84,7 @@
template <typename T>
void CheckForDuplicates(std::vector<T> blocks) {
std::sort(blocks.begin(), blocks.end());
- typename std::vector<T>::iterator it =
- std::adjacent_find(blocks.begin(), blocks.end());
+ auto it = std::adjacent_find(blocks.begin(), blocks.end());
if (it != blocks.end()) {
// In case there are duplicates, we search for their location.
std::map<T, std::vector<int>> blocks_map;
@@ -384,8 +383,7 @@
std::vector<ResidualBlock*> residual_blocks;
problem->GetResidualBlocks(&residual_blocks);
- for (int i = 0; i < residual_blocks.size(); ++i) {
- ResidualBlock* residual_block = residual_blocks[i];
+ for (auto* residual_block : residual_blocks) {
parameter_blocks_in_use.insert(residual_block->parameter_blocks(),
residual_block->parameter_blocks() +
residual_block->NumParameterBlocks());
@@ -395,8 +393,7 @@
std::vector<double*>& active_parameter_blocks =
evaluate_options_.parameter_blocks;
active_parameter_blocks.clear();
- for (int i = 0; i < all_parameter_blocks.size(); ++i) {
- double* parameter_block = all_parameter_blocks[i];
+ for (auto* parameter_block : all_parameter_blocks) {
ParameterBlock* block = FindOrDie(parameter_map, parameter_block);
if (!block->IsConstant() && (parameter_blocks_in_use.count(block) > 0)) {
active_parameter_blocks.push_back(parameter_block);
@@ -412,8 +409,7 @@
// ordering of parameter blocks just constructed.
int num_rows = 0;
parameter_block_to_row_index_.clear();
- for (int i = 0; i < active_parameter_blocks.size(); ++i) {
- double* parameter_block = active_parameter_blocks[i];
+ for (auto* parameter_block : active_parameter_blocks) {
const int parameter_block_size =
problem->ParameterBlockTangentSize(parameter_block);
parameter_block_to_row_index_[parameter_block] = num_rows;
@@ -425,9 +421,7 @@
// triangular part of the matrix.
int num_nonzeros = 0;
CovarianceBlocks covariance_blocks;
- for (int i = 0; i < original_covariance_blocks.size(); ++i) {
- const std::pair<const double*, const double*>& block_pair =
- original_covariance_blocks[i];
+ for (const auto& block_pair : original_covariance_blocks) {
if (constant_parameter_blocks_.count(block_pair.first) > 0 ||
constant_parameter_blocks_.count(block_pair.second) > 0) {
continue;
@@ -823,7 +817,7 @@
problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
event_logger.AddEvent("Evaluate");
- typedef Eigen::SparseMatrix<double, Eigen::ColMajor> EigenSparseMatrix;
+ using EigenSparseMatrix = Eigen::SparseMatrix<double, Eigen::ColMajor>;
// Convert the matrix to column major order as required by SparseQR.
EigenSparseMatrix sparse_jacobian =
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
index 20b480f..7854be3 100644
--- a/internal/ceres/covariance_test.cc
+++ b/internal/ceres/covariance_test.cc
@@ -185,12 +185,12 @@
// clang-format on
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(make_pair(block1, block1));
- covariance_blocks.push_back(make_pair(block4, block4));
- covariance_blocks.push_back(make_pair(block2, block2));
- covariance_blocks.push_back(make_pair(block3, block3));
- covariance_blocks.push_back(make_pair(block2, block3));
- covariance_blocks.push_back(make_pair(block4, block1)); // reversed
+ covariance_blocks.emplace_back(block1, block1);
+ covariance_blocks.emplace_back(block4, block4);
+ covariance_blocks.emplace_back(block2, block2);
+ covariance_blocks.emplace_back(block3, block3);
+ covariance_blocks.emplace_back(block2, block3);
+ covariance_blocks.emplace_back(block4, block1); // reversed
Covariance::Options options;
CovarianceImpl covariance_impl(options);
@@ -266,12 +266,12 @@
// clang-format on
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(make_pair(block1, block1));
- covariance_blocks.push_back(make_pair(block4, block4));
- covariance_blocks.push_back(make_pair(block2, block2));
- covariance_blocks.push_back(make_pair(block3, block3));
- covariance_blocks.push_back(make_pair(block2, block3));
- covariance_blocks.push_back(make_pair(block4, block1)); // reversed
+ covariance_blocks.emplace_back(block1, block1);
+ covariance_blocks.emplace_back(block4, block4);
+ covariance_blocks.emplace_back(block2, block2);
+ covariance_blocks.emplace_back(block3, block3);
+ covariance_blocks.emplace_back(block2, block3);
+ covariance_blocks.emplace_back(block4, block1); // reversed
Covariance::Options options;
CovarianceImpl covariance_impl(options);
@@ -345,12 +345,12 @@
// clang-format on
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(make_pair(block1, block1));
- covariance_blocks.push_back(make_pair(block4, block4));
- covariance_blocks.push_back(make_pair(block2, block2));
- covariance_blocks.push_back(make_pair(block3, block3));
- covariance_blocks.push_back(make_pair(block2, block3));
- covariance_blocks.push_back(make_pair(block4, block1)); // reversed
+ covariance_blocks.emplace_back(block1, block1);
+ covariance_blocks.emplace_back(block4, block4);
+ covariance_blocks.emplace_back(block2, block2);
+ covariance_blocks.emplace_back(block3, block3);
+ covariance_blocks.emplace_back(block2, block3);
+ covariance_blocks.emplace_back(block4, block1); // reversed
Covariance::Options options;
CovarianceImpl covariance_impl(options);
@@ -432,7 +432,7 @@
class CovarianceTest : public ::testing::Test {
protected:
- typedef map<const double*, pair<int, int>> BoundsMap;
+ using BoundsMap = map<const double*, pair<int, int>>;
void SetUp() override {
double* x = parameters_;
@@ -478,12 +478,12 @@
new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), nullptr, z, x);
}
- all_covariance_blocks_.push_back(make_pair(x, x));
- all_covariance_blocks_.push_back(make_pair(y, y));
- all_covariance_blocks_.push_back(make_pair(z, z));
- all_covariance_blocks_.push_back(make_pair(x, y));
- all_covariance_blocks_.push_back(make_pair(x, z));
- all_covariance_blocks_.push_back(make_pair(y, z));
+ all_covariance_blocks_.emplace_back(x, x);
+ all_covariance_blocks_.emplace_back(y, y);
+ all_covariance_blocks_.emplace_back(z, z);
+ all_covariance_blocks_.emplace_back(x, y);
+ all_covariance_blocks_.emplace_back(x, z);
+ all_covariance_blocks_.emplace_back(y, z);
column_bounds_[x] = make_pair(0, 2);
column_bounds_[y] = make_pair(2, 5);
@@ -543,9 +543,9 @@
Covariance covariance(options);
EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem_));
- for (int i = 0; i < covariance_blocks.size(); ++i) {
- const double* block1 = covariance_blocks[i].first;
- const double* block2 = covariance_blocks[i].second;
+ for (auto& covariance_block : covariance_blocks) {
+ const double* block1 = covariance_block.first;
+ const double* block2 = covariance_block.second;
// block1, block2
GetCovarianceBlockAndCompare(block1,
block2,
@@ -1350,10 +1350,10 @@
"Covariance::Compute called with duplicate blocks "
"at indices \\(0, 1\\) and \\(2, 3\\)");
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(make_pair(x, x));
- covariance_blocks.push_back(make_pair(x, x));
- covariance_blocks.push_back(make_pair(y, y));
- covariance_blocks.push_back(make_pair(y, y));
+ covariance_blocks.emplace_back(x, x);
+ covariance_blocks.emplace_back(x, x);
+ covariance_blocks.emplace_back(y, y);
+ covariance_blocks.emplace_back(y, y);
EXPECT_DEATH_IF_SUPPORTED(covariance.Compute(covariance_blocks, &problem_),
"Covariance::Compute called with duplicate blocks "
"at indices \\(0, 1\\) and \\(2, 3\\)");
@@ -1398,12 +1398,12 @@
new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), nullptr, z, x);
}
- all_covariance_blocks_.push_back(make_pair(x, x));
- all_covariance_blocks_.push_back(make_pair(y, y));
- all_covariance_blocks_.push_back(make_pair(z, z));
- all_covariance_blocks_.push_back(make_pair(x, y));
- all_covariance_blocks_.push_back(make_pair(x, z));
- all_covariance_blocks_.push_back(make_pair(y, z));
+ all_covariance_blocks_.emplace_back(x, x);
+ all_covariance_blocks_.emplace_back(y, y);
+ all_covariance_blocks_.emplace_back(z, z);
+ all_covariance_blocks_.emplace_back(x, y);
+ all_covariance_blocks_.emplace_back(x, z);
+ all_covariance_blocks_.emplace_back(y, z);
column_bounds_[x] = make_pair(0, 2);
column_bounds_[y] = make_pair(2, 5);
@@ -1475,10 +1475,10 @@
options.algorithm_type = DENSE_SVD;
Covariance covariance(options);
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(std::make_pair(&x, &x));
- covariance_blocks.push_back(std::make_pair(&x, &y));
- covariance_blocks.push_back(std::make_pair(&y, &x));
- covariance_blocks.push_back(std::make_pair(&y, &y));
+ covariance_blocks.emplace_back(&x, &x);
+ covariance_blocks.emplace_back(&x, &y);
+ covariance_blocks.emplace_back(&y, &x);
+ covariance_blocks.emplace_back(&y, &y);
EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem));
double value = -1;
@@ -1510,10 +1510,10 @@
options.algorithm_type = DENSE_SVD;
Covariance covariance(options);
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(std::make_pair(&x, &x));
- covariance_blocks.push_back(std::make_pair(&x, &y));
- covariance_blocks.push_back(std::make_pair(&y, &x));
- covariance_blocks.push_back(std::make_pair(&y, &y));
+ covariance_blocks.emplace_back(&x, &x);
+ covariance_blocks.emplace_back(&x, &y);
+ covariance_blocks.emplace_back(&y, &x);
+ covariance_blocks.emplace_back(&y, &y);
EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem));
double value = -1;
@@ -1543,10 +1543,10 @@
options.algorithm_type = DENSE_SVD;
Covariance covariance(options);
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(std::make_pair(&x, &x));
- covariance_blocks.push_back(std::make_pair(&x, &y));
- covariance_blocks.push_back(std::make_pair(&y, &x));
- covariance_blocks.push_back(std::make_pair(&y, &y));
+ covariance_blocks.emplace_back(&x, &x);
+ covariance_blocks.emplace_back(&x, &y);
+ covariance_blocks.emplace_back(&y, &x);
+ covariance_blocks.emplace_back(&y, &y);
EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem));
double value = -1;
@@ -1578,10 +1578,10 @@
options.algorithm_type = DENSE_SVD;
Covariance covariance(options);
vector<pair<const double*, const double*>> covariance_blocks;
- covariance_blocks.push_back(std::make_pair(&x, &x));
- covariance_blocks.push_back(std::make_pair(&x, &y));
- covariance_blocks.push_back(std::make_pair(&y, &x));
- covariance_blocks.push_back(std::make_pair(&y, &y));
+ covariance_blocks.emplace_back(&x, &x);
+ covariance_blocks.emplace_back(&x, &y);
+ covariance_blocks.emplace_back(&y, &x);
+ covariance_blocks.emplace_back(&y, &y);
EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem));
double value = -1;
@@ -1620,7 +1620,7 @@
block_i);
for (int j = i; j < num_parameter_blocks_; ++j) {
double* block_j = parameters_.get() + j * parameter_block_size_;
- all_covariance_blocks_.push_back(make_pair(block_i, block_j));
+ all_covariance_blocks_.emplace_back(block_i, block_j);
}
}
}
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index 7aa39fa..b1eb205 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -127,7 +127,7 @@
vector<int> scalar_ordering;
BlockOrderingToScalarOrdering(row_blocks, block_ordering, &scalar_ordering);
- cs_dis* symbolic_factor =
+ auto* symbolic_factor =
reinterpret_cast<cs_dis*>(cs_calloc(1, sizeof(cs_dis)));
symbolic_factor->pinv = cs_pinv(&scalar_ordering[0], A->n);
cs* permuted_A = cs_symperm(A, symbolic_factor->pinv, 0);
@@ -139,7 +139,7 @@
cs_free(postordering);
cs_spfree(permuted_A);
- symbolic_factor->cp = (int*)cs_malloc(A->n + 1, sizeof(int));
+ symbolic_factor->cp = static_cast<int*>(cs_malloc(A->n + 1, sizeof(int)));
symbolic_factor->lnz = cs_cumsum(symbolic_factor->cp, column_counts, A->n);
symbolic_factor->unz = symbolic_factor->lnz;
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index 74135f9..97fc045 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -154,7 +154,7 @@
std::string* message) final;
private:
- CXSparseCholesky(const OrderingType ordering_type);
+ explicit CXSparseCholesky(const OrderingType ordering_type);
void FreeSymbolicFactorization();
void FreeNumericFactorization();
diff --git a/internal/ceres/dense_cholesky_test.cc b/internal/ceres/dense_cholesky_test.cc
index 034206a..0e84207 100644
--- a/internal/ceres/dense_cholesky_test.cc
+++ b/internal/ceres/dense_cholesky_test.cc
@@ -45,7 +45,7 @@
namespace ceres {
namespace internal {
-typedef DenseLinearAlgebraLibraryType Param;
+using Param = DenseLinearAlgebraLibraryType;
namespace {
diff --git a/internal/ceres/dense_linear_solver_test.cc b/internal/ceres/dense_linear_solver_test.cc
index 8110d8d..9a9dd2d 100644
--- a/internal/ceres/dense_linear_solver_test.cc
+++ b/internal/ceres/dense_linear_solver_test.cc
@@ -42,9 +42,8 @@
namespace ceres {
namespace internal {
-typedef ::testing::
- tuple<LinearSolverType, DenseLinearAlgebraLibraryType, bool, int>
- Param;
+using Param = ::testing::
+ tuple<LinearSolverType, DenseLinearAlgebraLibraryType, bool, int>;
static std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
Param param = info.param;
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
index 52bf1b8..30a0c02 100644
--- a/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -30,6 +30,8 @@
#include "ceres/dense_normal_cholesky_solver.h"
+#include <utility>
+
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/internal/eigen.h"
@@ -41,8 +43,9 @@
namespace internal {
DenseNormalCholeskySolver::DenseNormalCholeskySolver(
- const LinearSolver::Options& options)
- : options_(options), cholesky_(DenseCholesky::Create(options_)) {}
+ LinearSolver::Options options)
+ : options_(std::move(options)),
+ cholesky_(DenseCholesky::Create(options_)) {}
LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
DenseSparseMatrix* A,
diff --git a/internal/ceres/dense_normal_cholesky_solver.h b/internal/ceres/dense_normal_cholesky_solver.h
index 3959436..5b3c740 100644
--- a/internal/ceres/dense_normal_cholesky_solver.h
+++ b/internal/ceres/dense_normal_cholesky_solver.h
@@ -81,7 +81,7 @@
class CERES_NO_EXPORT DenseNormalCholeskySolver
: public DenseSparseMatrixSolver {
public:
- explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
+ explicit DenseNormalCholeskySolver(LinearSolver::Options options);
private:
LinearSolver::Summary SolveImpl(
diff --git a/internal/ceres/dense_qr_test.cc b/internal/ceres/dense_qr_test.cc
index f796186..402a7e2 100644
--- a/internal/ceres/dense_qr_test.cc
+++ b/internal/ceres/dense_qr_test.cc
@@ -46,7 +46,7 @@
namespace ceres {
namespace internal {
-typedef DenseLinearAlgebraLibraryType Param;
+using Param = DenseLinearAlgebraLibraryType;
namespace {
diff --git a/internal/ceres/dense_sparse_matrix.cc b/internal/ceres/dense_sparse_matrix.cc
index 9e6979d..8b967f2 100644
--- a/internal/ceres/dense_sparse_matrix.cc
+++ b/internal/ceres/dense_sparse_matrix.cc
@@ -31,6 +31,7 @@
#include "ceres/dense_sparse_matrix.h"
#include <algorithm>
+#include <utility>
#include "ceres/internal/eigen.h"
#include "ceres/internal/export.h"
@@ -55,7 +56,7 @@
}
}
-DenseSparseMatrix::DenseSparseMatrix(const Matrix& m) : m_(m) {}
+DenseSparseMatrix::DenseSparseMatrix(Matrix m) : m_(std::move(m)) {}
void DenseSparseMatrix::SetZero() { m_.setZero(); }
diff --git a/internal/ceres/dense_sparse_matrix.h b/internal/ceres/dense_sparse_matrix.h
index 9f0835b..655cbb8 100644
--- a/internal/ceres/dense_sparse_matrix.h
+++ b/internal/ceres/dense_sparse_matrix.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@
// Build a matrix with the same content as the TripletSparseMatrix
// m. This assumes that m does not have any repeated entries.
explicit DenseSparseMatrix(const TripletSparseMatrix& m);
- explicit DenseSparseMatrix(const Matrix& m);
+ explicit DenseSparseMatrix(Matrix m);
DenseSparseMatrix(int num_rows, int num_cols);
// SparseMatrix interface.
diff --git a/internal/ceres/detect_structure_test.cc b/internal/ceres/detect_structure_test.cc
index 8f9c5ed..3cf95cf 100644
--- a/internal/ceres/detect_structure_test.cc
+++ b/internal/ceres/detect_structure_test.cc
@@ -45,34 +45,34 @@
CompressedRowBlockStructure bs;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 0;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 3;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 7;
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(1, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(1, 0);
}
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 2;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(2, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(2, 0);
}
int row_block_size = 0;
@@ -94,34 +94,34 @@
CompressedRowBlockStructure bs;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 0;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 3;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 7;
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(1, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(1, 0);
}
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 1;
row.block.position = 2;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(2, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(2, 0);
}
int row_block_size = 0;
@@ -143,34 +143,34 @@
CompressedRowBlockStructure bs;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 0;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 3;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 7;
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(1, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(1, 0);
}
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 2;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(2, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(2, 0);
}
int row_block_size = 0;
@@ -192,34 +192,34 @@
CompressedRowBlockStructure bs;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 0;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 3;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 7;
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(2, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(2, 0);
}
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 2;
- row.cells.push_back(Cell(1, 0));
- row.cells.push_back(Cell(2, 0));
+ row.cells.emplace_back(1, 0);
+ row.cells.emplace_back(2, 0);
}
int row_block_size = 0;
@@ -241,26 +241,26 @@
CompressedRowBlockStructure bs;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 0;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 4;
bs.cols.back().position = 3;
- bs.cols.push_back(Block());
+ bs.cols.emplace_back();
bs.cols.back().size = 3;
bs.cols.back().position = 7;
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(1, 0));
- row.cells.push_back(Cell(2, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(1, 0);
+ row.cells.emplace_back(2, 0);
}
int row_block_size = 0;
diff --git a/internal/ceres/dogleg_strategy.h b/internal/ceres/dogleg_strategy.h
index 17f0cc7..1d219af 100644
--- a/internal/ceres/dogleg_strategy.h
+++ b/internal/ceres/dogleg_strategy.h
@@ -76,8 +76,8 @@
Matrix subspace_B() const { return subspace_B_; }
private:
- typedef Eigen::Matrix<double, 2, 1, Eigen::DontAlign> Vector2d;
- typedef Eigen::Matrix<double, 2, 2, Eigen::DontAlign> Matrix2d;
+ using Vector2d = Eigen::Matrix<double, 2, 1, Eigen::DontAlign>;
+ using Matrix2d = Eigen::Matrix<double, 2, 2, Eigen::DontAlign>;
LinearSolver::Summary ComputeGaussNewtonStep(
const PerSolveOptions& per_solve_options,
diff --git a/internal/ceres/dynamic_autodiff_cost_function_test.cc b/internal/ceres/dynamic_autodiff_cost_function_test.cc
index 7da81a9..7ee820a 100644
--- a/internal/ceres/dynamic_autodiff_cost_function_test.cc
+++ b/internal/ceres/dynamic_autodiff_cost_function_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -149,8 +149,8 @@
EXPECT_EQ(4 * p - 8, jacobian_vect[0][20 * 10 + p]);
jacobian_vect[0][20 * 10 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[0].size(); ++i) {
- EXPECT_EQ(0.0, jacobian_vect[0][i]);
+ for (double entry : jacobian_vect[0]) {
+ EXPECT_EQ(0.0, entry);
}
// Check "C" Jacobian for second parameter block.
@@ -158,8 +158,8 @@
EXPECT_EQ(1.0, jacobian_vect[1][20 * 5 + p]);
jacobian_vect[1][20 * 5 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[1].size(); ++i) {
- EXPECT_EQ(0.0, jacobian_vect[1][i]);
+ for (double entry : jacobian_vect[1]) {
+ EXPECT_EQ(0.0, entry);
}
}
@@ -207,8 +207,8 @@
EXPECT_EQ(1.0, jacobian_vect[1][20 * 5 + p]);
jacobian_vect[1][20 * 5 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[1].size(); ++i) {
- EXPECT_EQ(0.0, jacobian_vect[1][i]);
+ for (double& i : jacobian_vect[1]) {
+ EXPECT_EQ(0.0, i);
}
}
@@ -265,8 +265,8 @@
EXPECT_EQ(4 * p - 8, jacobian_vect[0][20 * 10 + p]);
jacobian_vect[0][20 * 10 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[0].size(); ++i) {
- EXPECT_EQ(0.0, jacobian_vect[0][i]);
+ for (double& i : jacobian_vect[0]) {
+ EXPECT_EQ(0.0, i);
}
}
@@ -327,8 +327,8 @@
parameter_blocks_[2] = &z_[0];
// Prepare the cost function.
- typedef DynamicAutoDiffCostFunction<MyThreeParameterCostFunctor, 3>
- DynamicMyThreeParameterCostFunction;
+ using DynamicMyThreeParameterCostFunction =
+ DynamicAutoDiffCostFunction<MyThreeParameterCostFunctor, 3>;
auto cost_function = std::make_unique<DynamicMyThreeParameterCostFunction>(
new MyThreeParameterCostFunctor());
cost_function->AddParameterBlock(1);
@@ -559,8 +559,8 @@
parameter_blocks_[5] = &z2_;
// Prepare the cost function.
- typedef DynamicAutoDiffCostFunction<MySixParameterCostFunctor, 3>
- DynamicMySixParameterCostFunction;
+ using DynamicMySixParameterCostFunction =
+ DynamicAutoDiffCostFunction<MySixParameterCostFunctor, 3>;
auto cost_function = std::make_unique<DynamicMySixParameterCostFunction>(
new MySixParameterCostFunctor());
for (int i = 0; i < 6; ++i) {
diff --git a/internal/ceres/dynamic_compressed_row_finalizer.h b/internal/ceres/dynamic_compressed_row_finalizer.h
index 1645ece..fedee3b 100644
--- a/internal/ceres/dynamic_compressed_row_finalizer.h
+++ b/internal/ceres/dynamic_compressed_row_finalizer.h
@@ -40,7 +40,7 @@
struct CERES_NO_EXPORT DynamicCompressedRowJacobianFinalizer {
void operator()(SparseMatrix* base_jacobian, int num_parameters) {
- DynamicCompressedRowSparseMatrix* jacobian =
+ auto* jacobian =
down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
jacobian->Finalize(num_parameters);
}
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
index 4ae9518..8c254e9 100644
--- a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -62,8 +62,7 @@
int residual_offset,
double** jacobians,
SparseMatrix* base_jacobian) {
- DynamicCompressedRowSparseMatrix* jacobian =
- down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+ auto* jacobian = down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
// Get the `residual_block` of interest.
const ResidualBlock* residual_block =
@@ -79,11 +78,10 @@
jacobian->ClearRows(residual_offset, num_residuals);
// Iterate over each parameter block.
- for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+ for (const auto& evaluated_jacobian_block : evaluated_jacobian_blocks) {
const ParameterBlock* parameter_block =
- program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
- const int parameter_block_jacobian_index =
- evaluated_jacobian_blocks[i].second;
+ program_->parameter_blocks()[evaluated_jacobian_block.first];
+ const int parameter_block_jacobian_index = evaluated_jacobian_block.second;
const int parameter_block_size = parameter_block->TangentSize();
const double* parameter_jacobian =
jacobians[parameter_block_jacobian_index];
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
index 936e682..7185e14 100644
--- a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -70,8 +70,8 @@
// Count the number of non-zeros and resize `cols_` and `values_`.
int num_jacobian_nonzeros = 0;
- for (int i = 0; i < dynamic_cols_.size(); ++i) {
- num_jacobian_nonzeros += dynamic_cols_[i].size();
+ for (const auto& dynamic_col : dynamic_cols_) {
+ num_jacobian_nonzeros += dynamic_col.size();
}
SetMaxNumNonZeros(num_jacobian_nonzeros + num_additional_elements);
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
index 3b67389..269858f 100644
--- a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
@@ -141,7 +141,7 @@
}
void ExpectEqualToCompressedRowSparseMatrixReference() {
- typedef Eigen::Map<const Eigen::VectorXi> ConstIntVectorRef;
+ using ConstIntVectorRef = Eigen::Map<const Eigen::VectorXi>;
ConstIntVectorRef crsm_rows(crsm->rows(), crsm->num_rows() + 1);
ConstIntVectorRef dcrsm_rows(dcrsm->rows(), dcrsm->num_rows() + 1);
diff --git a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
index ad3f479..e35e1bc 100644
--- a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
+++ b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -149,8 +149,8 @@
EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
jacobian_vect[0][20 * 10 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[0].size(); ++i) {
- EXPECT_NEAR(0.0, jacobian_vect[0][i], kTolerance);
+ for (double entry : jacobian_vect[0]) {
+ EXPECT_NEAR(0.0, entry, kTolerance);
}
// Check "C" Jacobian for second parameter block.
@@ -158,8 +158,8 @@
EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
jacobian_vect[1][20 * 5 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[1].size(); ++i) {
- EXPECT_NEAR(0.0, jacobian_vect[1][i], kTolerance);
+ for (double entry : jacobian_vect[1]) {
+ EXPECT_NEAR(0.0, entry, kTolerance);
}
}
@@ -208,8 +208,8 @@
EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
jacobian_vect[1][20 * 5 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[1].size(); ++i) {
- EXPECT_EQ(0.0, jacobian_vect[1][i]);
+ for (double& i : jacobian_vect[1]) {
+ EXPECT_EQ(0.0, i);
}
}
@@ -266,8 +266,8 @@
EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
jacobian_vect[0][20 * 10 + p] = 0.0;
}
- for (int i = 0; i < jacobian_vect[0].size(); ++i) {
- EXPECT_EQ(0.0, jacobian_vect[0][i]);
+ for (double& i : jacobian_vect[0]) {
+ EXPECT_EQ(0.0, i);
}
}
@@ -328,8 +328,8 @@
parameter_blocks_[2] = &z_[0];
// Prepare the cost function.
- typedef DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>
- DynamicMyThreeParameterCostFunction;
+ using DynamicMyThreeParameterCostFunction =
+ DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>;
auto cost_function = std::make_unique<DynamicMyThreeParameterCostFunction>(
new MyThreeParameterCostFunctor());
cost_function->AddParameterBlock(1);
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index d101aef..078edbf 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -35,6 +35,7 @@
#include <ctime>
#include <memory>
#include <sstream>
+#include <utility>
#include "Eigen/SparseCore"
#include "ceres/compressed_row_sparse_matrix.h"
@@ -54,8 +55,8 @@
namespace internal {
DynamicSparseNormalCholeskySolver::DynamicSparseNormalCholeskySolver(
- const LinearSolver::Options& options)
- : options_(options) {}
+ LinearSolver::Options options)
+ : options_(std::move(options)) {}
LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImpl(
CompressedRowSparseMatrix* A,
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.h b/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
index 9ab2124..6f73c96 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
@@ -57,8 +57,7 @@
class CERES_NO_EXPORT DynamicSparseNormalCholeskySolver
: public CompressedRowSparseMatrixSolver {
public:
- explicit DynamicSparseNormalCholeskySolver(
- const LinearSolver::Options& options);
+ explicit DynamicSparseNormalCholeskySolver(LinearSolver::Options options);
private:
LinearSolver::Summary SolveImpl(CompressedRowSparseMatrix* A,
diff --git a/internal/ceres/dynamic_sparsity_test.cc b/internal/ceres/dynamic_sparsity_test.cc
index 6c7b24c..8c98528 100644
--- a/internal/ceres/dynamic_sparsity_test.cc
+++ b/internal/ceres/dynamic_sparsity_test.cc
@@ -32,6 +32,7 @@
// Based on examples/ellipse_approximation.cc
#include <cmath>
+#include <utility>
#include <vector>
#include "ceres/ceres.h"
@@ -280,8 +281,8 @@
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
PointToLineSegmentContourCostFunction(const int num_segments,
- const Eigen::Vector2d& y)
- : num_segments_(num_segments), y_(y) {
+ Eigen::Vector2d y)
+ : num_segments_(num_segments), y_(std::move(y)) {
// The first parameter is the preimage position.
mutable_parameter_block_sizes()->push_back(1);
// The next parameters are the control points for the line segment contour.
diff --git a/internal/ceres/eigensparse.cc b/internal/ceres/eigensparse.cc
index 38055dc..81668c8 100644
--- a/internal/ceres/eigensparse.cc
+++ b/internal/ceres/eigensparse.cc
@@ -49,7 +49,7 @@
template <typename Solver>
class EigenSparseCholeskyTemplate final : public SparseCholesky {
public:
- EigenSparseCholeskyTemplate() : analyzed_(false) {}
+ EigenSparseCholeskyTemplate() = default;
CompressedRowSparseMatrix::StorageType StorageType() const final {
return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
}
@@ -136,20 +136,19 @@
private:
Eigen::Matrix<typename Solver::Scalar, Eigen::Dynamic, 1> values_,
scalar_rhs_, scalar_solution_;
- bool analyzed_;
+ bool analyzed_{false};
Solver solver_;
};
std::unique_ptr<SparseCholesky> EigenSparseCholesky::Create(
const OrderingType ordering_type) {
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
- Eigen::Upper,
- Eigen::AMDOrdering<int>>
- WithAMDOrdering;
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
- Eigen::Upper,
- Eigen::NaturalOrdering<int>>
- WithNaturalOrdering;
+ using WithAMDOrdering = Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+ Eigen::Upper,
+ Eigen::AMDOrdering<int>>;
+ using WithNaturalOrdering =
+ Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+ Eigen::Upper,
+ Eigen::NaturalOrdering<int>>;
if (ordering_type == AMD) {
return std::make_unique<EigenSparseCholeskyTemplate<WithAMDOrdering>>();
@@ -162,14 +161,13 @@
std::unique_ptr<SparseCholesky> FloatEigenSparseCholesky::Create(
const OrderingType ordering_type) {
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
- Eigen::Upper,
- Eigen::AMDOrdering<int>>
- WithAMDOrdering;
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
- Eigen::Upper,
- Eigen::NaturalOrdering<int>>
- WithNaturalOrdering;
+ using WithAMDOrdering = Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
+ Eigen::Upper,
+ Eigen::AMDOrdering<int>>;
+ using WithNaturalOrdering =
+ Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
+ Eigen::Upper,
+ Eigen::NaturalOrdering<int>>;
if (ordering_type == AMD) {
return std::make_unique<EigenSparseCholeskyTemplate<WithAMDOrdering>>();
} else {
diff --git a/internal/ceres/evaluation_callback_test.cc b/internal/ceres/evaluation_callback_test.cc
index e9809a3..179c106 100644
--- a/internal/ceres/evaluation_callback_test.cc
+++ b/internal/ceres/evaluation_callback_test.cc
@@ -50,7 +50,7 @@
template <typename T>
uint64_t Djb2Hash(const T* data, const int size) {
uint64_t hash = 5381;
- const uint8_t* data_as_bytes = reinterpret_cast<const uint8_t*>(data);
+ const auto* data_as_bytes = reinterpret_cast<const uint8_t*>(data);
for (int i = 0; i < sizeof(*data) * size; ++i) {
hash = hash * 33 + data_as_bytes[i];
}
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
index 28e8ce2..68a4fb2 100644
--- a/internal/ceres/evaluator.h
+++ b/internal/ceres/evaluator.h
@@ -160,7 +160,7 @@
// life time issues. Further, these calls are not expected to be
// frequent or performance sensitive.
virtual std::map<std::string, CallStatistics> Statistics() const {
- return std::map<std::string, CallStatistics>();
+ return {};
}
};
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
index f5703f7..10c0d1d 100644
--- a/internal/ceres/evaluator_test.cc
+++ b/internal/ceres/evaluator_test.cc
@@ -60,7 +60,7 @@
template <int kFactor, int kNumResiduals, int... Ns>
class ParameterIgnoringCostFunction
: public SizedCostFunction<kNumResiduals, Ns...> {
- typedef SizedCostFunction<kNumResiduals, Ns...> Base;
+ using Base = SizedCostFunction<kNumResiduals, Ns...>;
public:
explicit ParameterIgnoringCostFunction(bool succeeds = true)
diff --git a/internal/ceres/execution_summary.h b/internal/ceres/execution_summary.h
index aac7ad6..fbee75f 100644
--- a/internal/ceres/execution_summary.h
+++ b/internal/ceres/execution_summary.h
@@ -34,6 +34,7 @@
#include <map>
#include <mutex>
#include <string>
+#include <utility>
#include "ceres/internal/export.h"
#include "ceres/wall_time.h"
@@ -42,9 +43,9 @@
namespace internal {
struct CallStatistics {
- CallStatistics() : time(0.), calls(0) {}
- double time;
- int calls;
+ CallStatistics() = default;
+ double time{0.};
+ int calls{0};
};
// Struct used by various objects to report statistics about their
@@ -69,8 +70,10 @@
class ScopedExecutionTimer {
public:
- ScopedExecutionTimer(const std::string& name, ExecutionSummary* summary)
- : start_time_(WallTimeInSeconds()), name_(name), summary_(summary) {}
+ ScopedExecutionTimer(std::string name, ExecutionSummary* summary)
+ : start_time_(WallTimeInSeconds()),
+ name_(std::move(name)),
+ summary_(summary) {}
~ScopedExecutionTimer() {
summary_->IncrementTimeBy(name_, WallTimeInSeconds() - start_time_);
diff --git a/internal/ceres/fixed_array_test.cc b/internal/ceres/fixed_array_test.cc
index 2982a98..d6c5605 100644
--- a/internal/ceres/fixed_array_test.cc
+++ b/internal/ceres/fixed_array_test.cc
@@ -14,8 +14,7 @@
#include "ceres/internal/fixed_array.h"
-#include <stdio.h>
-
+#include <cstdio>
#include <cstring>
#include <list>
#include <memory>
@@ -54,7 +53,7 @@
class ConstructionTester {
public:
- ConstructionTester() : self_ptr_(this), value_(0) { constructions++; }
+ ConstructionTester() : self_ptr_(this) { constructions++; }
~ConstructionTester() {
assert(self_ptr_ == this);
self_ptr_ = nullptr;
@@ -75,7 +74,7 @@
// self_ptr_ should always point to 'this' -- that's how we can be sure the
// constructor has been called.
ConstructionTester* self_ptr_;
- int value_;
+ int value_{0};
};
int ConstructionTester::constructions = 0;
@@ -117,7 +116,7 @@
TEST(FixedArrayTest, MoveCtor) {
ceres::internal::FixedArray<std::unique_ptr<int>, 10> on_stack(5);
for (int i = 0; i < 5; ++i) {
- on_stack[i] = std::unique_ptr<int>(new int(i));
+ on_stack[i] = std::make_unique<int>(i);
}
ceres::internal::FixedArray<std::unique_ptr<int>, 10> stack_copy =
@@ -127,7 +126,7 @@
ceres::internal::FixedArray<std::unique_ptr<int>, 10> allocated(15);
for (int i = 0; i < 15; ++i) {
- allocated[i] = std::unique_ptr<int>(new int(i));
+ allocated[i] = std::make_unique<int>(i);
}
ceres::internal::FixedArray<std::unique_ptr<int>, 10> alloced_copy =
@@ -655,9 +654,8 @@
using Alloc = std::allocator<T>;
using size_type = typename Alloc::size_type;
- CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {}
- explicit CountingAllocator(int64_t* b)
- : bytes_used_(b), instance_count_(nullptr) {}
+ CountingAllocator() = default;
+ explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
CountingAllocator(int64_t* b, int64_t* a)
: bytes_used_(b), instance_count_(a) {}
@@ -679,8 +677,8 @@
*bytes_used_ -= n * sizeof(T);
}
- int64_t* bytes_used_;
- int64_t* instance_count_;
+ int64_t* bytes_used_{nullptr};
+ int64_t* instance_count_{nullptr};
};
TEST(AllocatorSupportTest, CountInlineAllocations) {
diff --git a/internal/ceres/float_cxsparse.cc b/internal/ceres/float_cxsparse.cc
index a5cc527..a6d5e81 100644
--- a/internal/ceres/float_cxsparse.cc
+++ b/internal/ceres/float_cxsparse.cc
@@ -40,7 +40,7 @@
std::unique_ptr<SparseCholesky> FloatCXSparseCholesky::Create(
OrderingType ordering_type) {
LOG(FATAL) << "FloatCXSparseCholesky is not available.";
- return std::unique_ptr<SparseCholesky>();
+ return {};
}
} // namespace internal
diff --git a/internal/ceres/float_suitesparse.cc b/internal/ceres/float_suitesparse.cc
index 731a1e9..dc1d0e4 100644
--- a/internal/ceres/float_suitesparse.cc
+++ b/internal/ceres/float_suitesparse.cc
@@ -40,7 +40,7 @@
std::unique_ptr<SparseCholesky> FloatSuiteSparseCholesky::Create(
OrderingType ordering_type) {
LOG(FATAL) << "FloatSuiteSparseCholesky is not available.";
- return std::unique_ptr<SparseCholesky>();
+ return {};
}
} // namespace internal
diff --git a/internal/ceres/gradient_checker.cc b/internal/ceres/gradient_checker.cc
index fd302f0..777001e 100644
--- a/internal/ceres/gradient_checker.cc
+++ b/internal/ceres/gradient_checker.cc
@@ -150,7 +150,7 @@
GradientChecker::GradientChecker(const CostFunction* function,
const vector<const Manifold*>* manifolds,
const NumericDiffOptions& options)
- : delete_manifolds_(false), function_(function) {
+ : function_(function) {
CHECK(function != nullptr);
if (manifolds != nullptr) {
manifolds_ = *manifolds;
diff --git a/internal/ceres/gradient_checker_test.cc b/internal/ceres/gradient_checker_test.cc
index 1d59e2f..bef7668 100644
--- a/internal/ceres/gradient_checker_test.cc
+++ b/internal/ceres/gradient_checker_test.cc
@@ -34,6 +34,7 @@
#include <cmath>
#include <cstdlib>
+#include <utility>
#include <vector>
#include "ceres/cost_function.h"
@@ -289,8 +290,8 @@
*/
class LinearCostFunction : public CostFunction {
public:
- explicit LinearCostFunction(const Vector& residuals_offset)
- : residuals_offset_(residuals_offset) {
+ explicit LinearCostFunction(Vector residuals_offset)
+ : residuals_offset_(std::move(residuals_offset)) {
set_num_residuals(residuals_offset_.size());
}
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
index 3251806..1c3b318 100644
--- a/internal/ceres/gradient_checking_cost_function.cc
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,7 @@
#include <memory>
#include <numeric>
#include <string>
+#include <utility>
#include <vector>
#include "ceres/dynamic_numeric_diff_cost_function.h"
@@ -67,12 +68,12 @@
const std::vector<const Manifold*>* manifolds,
const NumericDiffOptions& options,
double relative_precision,
- const string& extra_info,
+ string extra_info,
GradientCheckingIterationCallback* callback)
: function_(function),
gradient_checker_(function, manifolds, options),
relative_precision_(relative_precision),
- extra_info_(extra_info),
+ extra_info_(std::move(extra_info)),
callback_(callback) {
CHECK(callback_ != nullptr);
const vector<int32_t>& parameter_block_sizes =
@@ -198,8 +199,7 @@
// For every ParameterBlock in problem_impl, create a new parameter block with
// the same manifold and constancy.
const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks[i];
+ for (auto* parameter_block : parameter_blocks) {
gradient_checking_problem_impl->AddParameterBlock(
parameter_block->mutable_user_state(),
parameter_block->Size(),
diff --git a/internal/ceres/gradient_problem_solver_test.cc b/internal/ceres/gradient_problem_solver_test.cc
index 47a69f3..407fb17 100644
--- a/internal/ceres/gradient_problem_solver_test.cc
+++ b/internal/ceres/gradient_problem_solver_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -113,8 +113,8 @@
ceres::Solve(options, problem, &x, &summary);
num_iterations = summary.iterations.size() - 1;
EXPECT_GT(num_iterations, 1);
- for (int i = 0; i < callback.x_values.size(); ++i) {
- EXPECT_EQ(50.0, callback.x_values[i]);
+ for (double value : callback.x_values) {
+ EXPECT_EQ(50.0, value);
}
// Second try: with updating
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
index 82a776c..677d767 100644
--- a/internal/ceres/implicit_schur_complement.cc
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -136,10 +136,10 @@
const double* D, BlockSparseMatrix* block_diagonal) {
const CompressedRowBlockStructure* block_diagonal_structure =
block_diagonal->block_structure();
- for (int r = 0; r < block_diagonal_structure->rows.size(); ++r) {
- const int row_block_pos = block_diagonal_structure->rows[r].block.position;
- const int row_block_size = block_diagonal_structure->rows[r].block.size;
- const Cell& cell = block_diagonal_structure->rows[r].cells[0];
+ for (const auto& row : block_diagonal_structure->rows) {
+ const int row_block_pos = row.block.position;
+ const int row_block_size = row.block.size;
+ const Cell& cell = row.cells[0];
MatrixRef m(block_diagonal->mutable_values() + cell.position,
row_block_size,
row_block_size);
diff --git a/internal/ceres/inner_product_computer.cc b/internal/ceres/inner_product_computer.cc
index c24f6fc..fbc43bf 100644
--- a/internal/ceres/inner_product_computer.cc
+++ b/internal/ceres/inner_product_computer.cc
@@ -167,8 +167,8 @@
for (int c2 = c2_begin; c2 < c2_end; ++c2) {
const Cell& cell2 = row.cells[c2];
- product_terms.push_back(InnerProductComputer::ProductTerm(
- cell1.block_id, cell2.block_id, product_terms.size()));
+ product_terms.emplace_back(
+ cell1.block_id, cell2.block_id, product_terms.size());
}
}
}
diff --git a/internal/ceres/inner_product_computer_test.cc b/internal/ceres/inner_product_computer_test.cc
index b672f8c..b01ad8e 100644
--- a/internal/ceres/inner_product_computer_test.cc
+++ b/internal/ceres/inner_product_computer_test.cc
@@ -117,8 +117,7 @@
random_matrix->ToTripletSparseMatrix(&tsm);
std::vector<Eigen::Triplet<double>> triplets;
for (int i = 0; i < tsm.num_nonzeros(); ++i) {
- triplets.push_back(Eigen::Triplet<double>(
- tsm.rows()[i], tsm.cols()[i], tsm.values()[i]));
+ triplets.emplace_back(tsm.rows()[i], tsm.cols()[i], tsm.values()[i]);
}
Eigen::SparseMatrix<double> eigen_random_matrix(
random_matrix->num_rows(), random_matrix->num_cols());
@@ -189,8 +188,8 @@
std::vector<Eigen::Triplet<double>> triplets;
for (int i = 0; i < tsm.num_nonzeros(); ++i) {
if (tsm.rows()[i] >= start_row && tsm.rows()[i] < end_row) {
- triplets.push_back(Eigen::Triplet<double>(
- tsm.rows()[i], tsm.cols()[i], tsm.values()[i]));
+ triplets.emplace_back(
+ tsm.rows()[i], tsm.cols()[i], tsm.values()[i]);
}
}
diff --git a/internal/ceres/iterative_refiner.h b/internal/ceres/iterative_refiner.h
index 87e45b1..837af17 100644
--- a/internal/ceres/iterative_refiner.h
+++ b/internal/ceres/iterative_refiner.h
@@ -62,7 +62,7 @@
public:
// max_num_iterations is the number of refinement iterations to
// perform.
- IterativeRefiner(int max_num_iterations);
+ explicit IterativeRefiner(int max_num_iterations);
// Needed for mocking.
virtual ~IterativeRefiner();
diff --git a/internal/ceres/iterative_refiner_test.cc b/internal/ceres/iterative_refiner_test.cc
index 5464a27..e2167b0 100644
--- a/internal/ceres/iterative_refiner_test.cc
+++ b/internal/ceres/iterative_refiner_test.cc
@@ -30,6 +30,8 @@
#include "ceres/iterative_refiner.h"
+#include <utility>
+
#include "Eigen/Dense"
#include "ceres/internal/eigen.h"
#include "ceres/sparse_cholesky.h"
@@ -53,7 +55,7 @@
// A fake SparseMatrix, which uses an Eigen matrix to do the real work.
class FakeSparseMatrix : public SparseMatrix {
public:
- FakeSparseMatrix(const Matrix& m) : m_(m) {}
+ explicit FakeSparseMatrix(Matrix m) : m_(std::move(m)) {}
// y += Ax
void RightMultiply(const double* x, double* y) const final {
@@ -88,7 +90,7 @@
template <typename Scalar>
class FakeSparseCholesky : public SparseCholesky {
public:
- FakeSparseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); }
+ explicit FakeSparseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); }
LinearSolverTerminationType Solve(const double* rhs_ptr,
double* solution_ptr,
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index 6ff84b5..bc22d68 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -32,6 +32,7 @@
#include <algorithm>
#include <cstring>
+#include <utility>
#include <vector>
#include "Eigen/Dense"
@@ -54,8 +55,8 @@
namespace internal {
IterativeSchurComplementSolver::IterativeSchurComplementSolver(
- const LinearSolver::Options& options)
- : options_(options) {}
+ LinearSolver::Options options)
+ : options_(std::move(options)) {}
IterativeSchurComplementSolver::~IterativeSchurComplementSolver() = default;
diff --git a/internal/ceres/iterative_schur_complement_solver.h b/internal/ceres/iterative_schur_complement_solver.h
index 0794f36..50f4694 100644
--- a/internal/ceres/iterative_schur_complement_solver.h
+++ b/internal/ceres/iterative_schur_complement_solver.h
@@ -73,7 +73,7 @@
class CERES_NO_EXPORT IterativeSchurComplementSolver final
: public BlockSparseMatrixSolver {
public:
- explicit IterativeSchurComplementSolver(const LinearSolver::Options& options);
+ explicit IterativeSchurComplementSolver(LinearSolver::Options options);
IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) =
delete;
void operator=(const IterativeSchurComplementSolver&) = delete;
diff --git a/internal/ceres/jet_operator_benchmark.cc b/internal/ceres/jet_operator_benchmark.cc
index 5701556..8d8900c 100644
--- a/internal/ceres/jet_operator_benchmark.cc
+++ b/internal/ceres/jet_operator_benchmark.cc
@@ -42,7 +42,7 @@
static constexpr std::size_t SIZE = 20;
public:
- JetInputData() : index_{0}, a_{}, b_{}, c_{}, d_{}, e_{} {
+ JetInputData() {
for (int i = 0; i < static_cast<int>(SIZE); i++) {
const T ti = static_cast<T>(i + 1);
@@ -83,12 +83,12 @@
T scalar_e() const { return scalar_e_[index_]; }
private:
- std::size_t index_;
- std::array<JetType, SIZE> a_;
- std::array<JetType, SIZE> b_;
- std::array<JetType, SIZE> c_;
- std::array<JetType, SIZE> d_;
- std::array<JetType, SIZE> e_;
+ std::size_t index_{0};
+ std::array<JetType, SIZE> a_{};
+ std::array<JetType, SIZE> b_{};
+ std::array<JetType, SIZE> c_{};
+ std::array<JetType, SIZE> d_{};
+ std::array<JetType, SIZE> e_{};
std::array<T, SIZE> scalar_a_;
std::array<T, SIZE> scalar_b_;
std::array<T, SIZE> scalar_c_;
diff --git a/internal/ceres/jet_test.cc b/internal/ceres/jet_test.cc
index 544575c..f06c599 100644
--- a/internal/ceres/jet_test.cc
+++ b/internal/ceres/jet_test.cc
@@ -547,7 +547,7 @@
TEST(Jet, Hypot2) {
// Resolve the ambiguity between two and three argument hypot overloads
using Hypot2 = J(const J&, const J&);
- Hypot2* const hypot2 = static_cast<Hypot2*>(&hypot<double, 2>);
+ auto* const hypot2 = static_cast<Hypot2*>(&hypot<double, 2>);
// clang-format off
NumericalTest2("hypot2", hypot2, 0.0, 1e-5);
@@ -1172,8 +1172,8 @@
}
TEST(Jet, Nested3X) {
- typedef Jet<J, 2> JJ;
- typedef Jet<JJ, 2> JJJ;
+ using JJ = Jet<J, 2>;
+ using JJJ = Jet<JJ, 2>;
JJJ x;
x.a = JJ(J(1, 0), 0);
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
index 818b7f5..0e7ec8d 100644
--- a/internal/ceres/levenberg_marquardt_strategy_test.cc
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -70,7 +70,7 @@
EXPECT_NEAR(per_solve_options.D[i], diagonal_[i], kTolerance)
<< i << " " << per_solve_options.D[i] << " " << diagonal_[i];
}
- return LinearSolver::Summary();
+ return {};
}
const int num_cols_;
@@ -131,8 +131,8 @@
diagonal[0] = options.min_lm_diagonal;
diagonal[1] = 2.0;
diagonal[2] = options.max_lm_diagonal;
- for (int i = 0; i < 3; ++i) {
- diagonal[i] = sqrt(diagonal[i] / options.initial_radius);
+ for (double& diagonal_entry : diagonal) {
+ diagonal_entry = sqrt(diagonal_entry / options.initial_radius);
}
RegularizationCheckingLinearSolver linear_solver(3, diagonal);
diff --git a/internal/ceres/line_search.cc b/internal/ceres/line_search.cc
index d64858f..7e7d97f 100644
--- a/internal/ceres/line_search.cc
+++ b/internal/ceres/line_search.cc
@@ -249,12 +249,12 @@
if (interpolation_type == QUADRATIC) {
// Two point interpolation using function values and the
// gradient at the lower bound.
- samples.push_back(FunctionSample(current.x, current.value));
+ samples.emplace_back(current.x, current.value);
if (previous.value_is_valid) {
// Three point interpolation, using function values and the
// gradient at the lower bound.
- samples.push_back(FunctionSample(previous.x, previous.value));
+ samples.emplace_back(previous.x, previous.value);
}
} else if (interpolation_type == CUBIC) {
// Two point interpolation using the function values and the gradients.
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h
index be7497e..47b256d 100644
--- a/internal/ceres/line_search_direction.h
+++ b/internal/ceres/line_search_direction.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -44,20 +44,13 @@
class CERES_NO_EXPORT LineSearchDirection {
public:
struct Options {
- Options()
- : num_parameters(0),
- type(LBFGS),
- nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
- function_tolerance(1e-12),
- max_lbfgs_rank(20),
- use_approximate_eigenvalue_bfgs_scaling(true) {}
-
- int num_parameters;
- LineSearchDirectionType type;
- NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
- double function_tolerance;
- int max_lbfgs_rank;
- bool use_approximate_eigenvalue_bfgs_scaling;
+ int num_parameters{0};
+ LineSearchDirectionType type{LBFGS};
+ NonlinearConjugateGradientType nonlinear_conjugate_gradient_type{
+ FLETCHER_REEVES};
+ double function_tolerance{1e-12};
+ int max_lbfgs_rank{20};
+ bool use_approximate_eigenvalue_bfgs_scaling{true};
};
static std::unique_ptr<LineSearchDirection> Create(const Options& options);
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
index 4b37e00..2d415af 100644
--- a/internal/ceres/linear_least_squares_problems.cc
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -299,12 +299,12 @@
problem->D = std::make_unique<double[]>(num_cols);
problem->num_eliminate_blocks = 2;
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ auto* bs = new CompressedRowBlockStructure;
std::unique_ptr<double[]> values =
std::make_unique<double[]>(num_rows * num_cols);
for (int c = 0; c < num_cols; ++c) {
- bs->cols.push_back(Block());
+ bs->cols.emplace_back();
bs->cols.back().size = 1;
bs->cols.back().position = c;
}
@@ -316,12 +316,12 @@
values[nnz++] = 1;
values[nnz++] = 2;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(2, 1));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(2, 1);
}
// Row 2
@@ -329,12 +329,12 @@
values[nnz++] = 3;
values[nnz++] = 4;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 1;
- row.cells.push_back(Cell(0, 2));
- row.cells.push_back(Cell(3, 3));
+ row.cells.emplace_back(0, 2);
+ row.cells.emplace_back(3, 3);
}
// Row 3
@@ -342,12 +342,12 @@
values[nnz++] = 5;
values[nnz++] = 6;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 2;
- row.cells.push_back(Cell(1, 4));
- row.cells.push_back(Cell(4, 5));
+ row.cells.emplace_back(1, 4);
+ row.cells.emplace_back(4, 5);
}
// Row 4
@@ -355,12 +355,12 @@
values[nnz++] = 7;
values[nnz++] = 8;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 3;
- row.cells.push_back(Cell(1, 6));
- row.cells.push_back(Cell(2, 7));
+ row.cells.emplace_back(1, 6);
+ row.cells.emplace_back(2, 7);
}
// Row 5
@@ -368,12 +368,12 @@
values[nnz++] = 9;
values[nnz++] = 1;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 4;
- row.cells.push_back(Cell(1, 8));
- row.cells.push_back(Cell(2, 9));
+ row.cells.emplace_back(1, 8);
+ row.cells.emplace_back(2, 9);
}
// Row 6
@@ -382,13 +382,13 @@
values[nnz++] = 1;
values[nnz++] = 1;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 5;
- row.cells.push_back(Cell(2, 10));
- row.cells.push_back(Cell(3, 11));
- row.cells.push_back(Cell(4, 12));
+ row.cells.emplace_back(2, 10);
+ row.cells.emplace_back(3, 11);
+ row.cells.emplace_back(4, 12);
}
auto A = std::make_unique<BlockSparseMatrix>(bs);
@@ -434,12 +434,12 @@
problem->D = std::make_unique<double[]>(num_cols);
problem->num_eliminate_blocks = 2;
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ auto* bs = new CompressedRowBlockStructure;
std::unique_ptr<double[]> values =
std::make_unique<double[]>(num_rows * num_cols);
for (int c = 0; c < num_cols; ++c) {
- bs->cols.push_back(Block());
+ bs->cols.emplace_back();
bs->cols.back().size = 1;
bs->cols.back().position = c;
}
@@ -449,51 +449,51 @@
// Row 1
{
values[nnz++] = 1;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
+ row.cells.emplace_back(0, 0);
}
// Row 2
{
values[nnz++] = 3;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 1;
- row.cells.push_back(Cell(0, 1));
+ row.cells.emplace_back(0, 1);
}
// Row 3
{
values[nnz++] = 5;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 2;
- row.cells.push_back(Cell(1, 2));
+ row.cells.emplace_back(1, 2);
}
// Row 4
{
values[nnz++] = 7;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 3;
- row.cells.push_back(Cell(1, 3));
+ row.cells.emplace_back(1, 3);
}
// Row 5
{
values[nnz++] = 9;
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 4;
- row.cells.push_back(Cell(1, 4));
+ row.cells.emplace_back(1, 4);
}
auto A = std::make_unique<BlockSparseMatrix>(bs);
@@ -543,20 +543,20 @@
problem->D = std::make_unique<double[]>(num_cols);
problem->num_eliminate_blocks = 1;
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ auto* bs = new CompressedRowBlockStructure;
std::unique_ptr<double[]> values =
std::make_unique<double[]>(num_rows * num_cols);
// Column block structure
- bs->cols.push_back(Block());
+ bs->cols.emplace_back();
bs->cols.back().size = 2;
bs->cols.back().position = 0;
- bs->cols.push_back(Block());
+ bs->cols.emplace_back();
bs->cols.back().size = 3;
bs->cols.back().position = 2;
- bs->cols.push_back(Block());
+ bs->cols.emplace_back();
bs->cols.back().size = 2;
bs->cols.back().position = 5;
@@ -564,18 +564,18 @@
// Row 1 & 2
{
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, nnz));
+ row.cells.emplace_back(0, nnz);
values[nnz++] = 1;
values[nnz++] = 2;
values[nnz++] = 1;
values[nnz++] = 4;
- row.cells.push_back(Cell(2, nnz));
+ row.cells.emplace_back(2, nnz);
values[nnz++] = 1;
values[nnz++] = 1;
values[nnz++] = 5;
@@ -584,17 +584,17 @@
// Row 3
{
- bs->rows.push_back(CompressedRow());
+ bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = 1;
row.block.position = 2;
- row.cells.push_back(Cell(1, nnz));
+ row.cells.emplace_back(1, nnz);
values[nnz++] = 9;
values[nnz++] = 0;
values[nnz++] = 0;
- row.cells.push_back(Cell(2, nnz));
+ row.cells.emplace_back(2, nnz);
values[nnz++] = 3;
values[nnz++] = 1;
}
diff --git a/internal/ceres/linear_least_squares_problems.h b/internal/ceres/linear_least_squares_problems.h
index 35ba246..a1f67eb 100644
--- a/internal/ceres/linear_least_squares_problems.h
+++ b/internal/ceres/linear_least_squares_problems.h
@@ -45,14 +45,14 @@
// Structure defining a linear least squares problem and if possible
// ground truth solutions. To be used by various LinearSolver tests.
struct CERES_NO_EXPORT LinearLeastSquaresProblem {
- LinearLeastSquaresProblem() : num_eliminate_blocks(0) {}
+ LinearLeastSquaresProblem() = default;
std::unique_ptr<SparseMatrix> A;
std::unique_ptr<double[]> b;
std::unique_ptr<double[]> D;
// If using the schur eliminator then how many of the variable
// blocks are e_type blocks.
- int num_eliminate_blocks;
+ int num_eliminate_blocks{0};
// Solution to min_x |Ax - b|^2
std::unique_ptr<double[]> x;
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index 962208c..2f709c2 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -286,7 +286,7 @@
// issues. Further, this calls are not expected to be frequent or
// performance sensitive.
virtual std::map<std::string, CallStatistics> Statistics() const {
- return std::map<std::string, CallStatistics>();
+ return {};
}
// Factory
diff --git a/internal/ceres/local_parameterization.cc b/internal/ceres/local_parameterization.cc
index 33e2aa0..db6f95a 100644
--- a/internal/ceres/local_parameterization.cc
+++ b/internal/ceres/local_parameterization.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -107,8 +107,8 @@
<< "of the parameter block.";
CHECK(std::adjacent_find(constant.begin(), constant.end()) == constant.end())
<< "The set of constant parameters cannot contain duplicates";
- for (int i = 0; i < constant_parameters.size(); ++i) {
- constancy_mask_[constant_parameters[i]] = 1;
+ for (int parameter : constant_parameters) {
+ constancy_mask_[parameter] = 1;
}
}
diff --git a/internal/ceres/local_parameterization_test.cc b/internal/ceres/local_parameterization_test.cc
index cb2ad74..38dbe2b 100644
--- a/internal/ceres/local_parameterization_test.cc
+++ b/internal/ceres/local_parameterization_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -332,8 +332,8 @@
Normalize<4>(x);
double delta[3] = {0.24, 0.15, 0.10};
- for (int i = 0; i < 3; ++i) {
- delta[i] = delta[i] * 1e-14;
+ for (double& delta_i : delta) {
+ delta_i = delta_i * 1e-14;
}
double q_delta[4];
@@ -411,8 +411,8 @@
x.normalize();
double delta[3] = {0.24, 0.15, 0.10};
- for (int i = 0; i < 3; ++i) {
- delta[i] = delta[i] * 1e-14;
+ for (double& delta_i : delta) {
+ delta_i = delta_i * 1e-14;
}
// Note: w is first in the constructor.
diff --git a/internal/ceres/low_rank_inverse_hessian.cc b/internal/ceres/low_rank_inverse_hessian.cc
index c73e5db..2fd1ac8 100644
--- a/internal/ceres/low_rank_inverse_hessian.cc
+++ b/internal/ceres/low_rank_inverse_hessian.cc
@@ -127,9 +127,7 @@
const int num_corrections = indices_.size();
Vector alpha(num_corrections);
- for (list<int>::const_reverse_iterator it = indices_.rbegin();
- it != indices_.rend();
- ++it) {
+ for (auto it = indices_.rbegin(); it != indices_.rend(); ++it) {
const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
delta_x_dot_delta_gradient_(*it);
search_direction -= alpha_i * delta_gradient_history_.col(*it);
diff --git a/internal/ceres/manifold_adapter.h b/internal/ceres/manifold_adapter.h
index 552e650..9a21456 100644
--- a/internal/ceres/manifold_adapter.h
+++ b/internal/ceres/manifold_adapter.h
@@ -11,7 +11,7 @@
// ManifoldAdapter NEVER takes ownership of local_parameterization.
class CERES_NO_EXPORT ManifoldAdapter final : public Manifold {
public:
- ManifoldAdapter(const LocalParameterization* local_parameterization)
+ explicit ManifoldAdapter(const LocalParameterization* local_parameterization)
: local_parameterization_(local_parameterization) {
CHECK(local_parameterization != nullptr);
}
diff --git a/internal/ceres/map_util.h b/internal/ceres/map_util.h
index bb6241d..5632c22 100644
--- a/internal/ceres/map_util.h
+++ b/internal/ceres/map_util.h
@@ -121,7 +121,7 @@
void InsertOrDie(Collection* const collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& data) {
- typedef typename Collection::value_type value_type;
+ using value_type = typename Collection::value_type;
CHECK(collection->insert(value_type(key, data)).second)
<< "duplicate key: " << key;
}
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
index 28010ec..f03c914 100644
--- a/internal/ceres/miniglog/glog/logging.h
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -120,7 +120,7 @@
namespace google {
-typedef int LogSeverity;
+using LogSeverity = int;
// clang-format off
const int INFO = ::INFO;
const int WARNING = ::WARNING;
diff --git a/internal/ceres/normal_prior.cc b/internal/ceres/normal_prior.cc
index 473d05c..17de40f 100644
--- a/internal/ceres/normal_prior.cc
+++ b/internal/ceres/normal_prior.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
diff --git a/internal/ceres/normal_prior_test.cc b/internal/ceres/normal_prior_test.cc
index 39a7eb6..c66f9d6 100644
--- a/internal/ceres/normal_prior_test.cc
+++ b/internal/ceres/normal_prior_test.cc
@@ -66,10 +66,10 @@
Matrix A(num_rows, num_cols);
RandomMatrix(&A);
- double* x = new double[num_cols];
+ auto* x = new double[num_cols];
for (int i = 0; i < num_cols; ++i) x[i] = 2 * RandDouble() - 1;
- double* jacobian = new double[num_rows * num_cols];
+ auto* jacobian = new double[num_rows * num_cols];
Vector residuals(num_rows);
NormalPrior prior(A, b);
@@ -102,7 +102,7 @@
Matrix A(num_rows, num_cols);
RandomMatrix(&A);
- double* x = new double[num_cols];
+ auto* x = new double[num_cols];
for (int i = 0; i < num_cols; ++i) x[i] = 2 * RandDouble() - 1;
double* jacobians[1];
diff --git a/internal/ceres/numeric_diff_test_utils.cc b/internal/ceres/numeric_diff_test_utils.cc
index d833bbb..95fde0b 100644
--- a/internal/ceres/numeric_diff_test_utils.cc
+++ b/internal/ceres/numeric_diff_test_utils.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -149,9 +149,9 @@
};
// clang-format on
- for (int k = 0; k < kTests.size(); ++k) {
- double* x1 = &(kTests[k].x1[0]);
- double* x2 = &(kTests[k].x2[0]);
+ for (auto& test : kTests) {
+ double* x1 = &(test.x1[0]);
+ double* x2 = &(test.x2[0]);
double* parameters[] = {x1, x2};
double dydx1[10];
@@ -207,8 +207,8 @@
// Minimal tolerance w.r.t. the cost function and the tests.
const double kTolerance = 2e-14;
- for (int k = 0; k < kTests.size(); ++k) {
- double* parameters[] = {&kTests[k]};
+ for (double& test : kTests) {
+ double* parameters[] = {&test};
double dydx;
double* jacobians[1] = {&dydx};
double residual;
@@ -216,7 +216,7 @@
ASSERT_TRUE(
cost_function.Evaluate(¶meters[0], &residual, &jacobians[0]));
- double expected_result = exp(kTests[k]);
+ double expected_result = exp(test);
// Expect residual to be close to exp(x).
ExpectClose(residual, expected_result, kTolerance);
@@ -248,8 +248,8 @@
// Initialize random number generator with given seed.
srand(random_seed_);
- for (int k = 0; k < kTests.size(); ++k) {
- double* parameters[] = {&kTests[k]};
+ for (double& test : kTests) {
+ double* parameters[] = {&test};
double dydx;
double* jacobians[1] = {&dydx};
double residual;
@@ -258,10 +258,10 @@
cost_function.Evaluate(¶meters[0], &residual, &jacobians[0]));
// Expect residual to be close to x^2 w.r.t. noise factor.
- ExpectClose(residual, kTests[k] * kTests[k], noise_factor_);
+ ExpectClose(residual, test * test, noise_factor_);
// Check evaluated differences. (dy/dx = ~2x)
- ExpectClose(dydx, 2 * kTests[k], kTolerance);
+ ExpectClose(dydx, 2 * test, kTolerance);
}
}
diff --git a/internal/ceres/parameter_block.h b/internal/ceres/parameter_block.h
index ff238fb..a9845a3 100644
--- a/internal/ceres/parameter_block.h
+++ b/internal/ceres/parameter_block.h
@@ -64,7 +64,7 @@
// proper disposal of the manifold.
class CERES_NO_EXPORT ParameterBlock {
public:
- typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
+ using ResidualBlockSet = std::unordered_set<ResidualBlock*>;
// Create a parameter block with the user state, size, and index specified.
// The size is the size of the parameter block and the index is the position
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
index 50a30c9..570a09c 100644
--- a/internal/ceres/parameter_block_ordering.cc
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -59,9 +59,9 @@
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
const std::unordered_set<ParameterBlock*>& vertices = graph->vertices();
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- if (vertices.count(parameter_blocks[i]) > 0) {
- ordering->push_back(parameter_blocks[i]);
+ for (auto* parameter_block : parameter_blocks) {
+ if (vertices.count(parameter_block) > 0) {
+ ordering->push_back(parameter_block);
}
}
event_logger.AddEvent("Preordering");
@@ -70,8 +70,7 @@
event_logger.AddEvent("StableIndependentSet");
// Add the excluded blocks to back of the ordering vector.
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks[i];
+ for (auto* parameter_block : parameter_blocks) {
if (parameter_block->IsConstant()) {
ordering->push_back(parameter_block);
}
@@ -91,8 +90,7 @@
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
// Add the excluded blocks to back of the ordering vector.
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks[i];
+ for (auto* parameter_block : parameter_blocks) {
if (parameter_block->IsConstant()) {
ordering->push_back(parameter_block);
}
@@ -129,16 +127,14 @@
auto graph = std::make_unique<Graph<ParameterBlock*>>();
CHECK(graph != nullptr);
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks[i];
+ for (auto* parameter_block : parameter_blocks) {
if (!parameter_block->IsConstant()) {
graph->AddVertex(parameter_block);
}
}
const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
- for (int i = 0; i < residual_blocks.size(); ++i) {
- const ResidualBlock* residual_block = residual_blocks[i];
+ for (auto* residual_block : residual_blocks) {
const int num_parameter_blocks = residual_block->NumParameterBlocks();
ParameterBlock* const* parameter_blocks =
residual_block->parameter_blocks();
diff --git a/internal/ceres/parameter_block_ordering_test.cc b/internal/ceres/parameter_block_ordering_test.cc
index a64f81b..00ae5f1 100644
--- a/internal/ceres/parameter_block_ordering_test.cc
+++ b/internal/ceres/parameter_block_ordering_test.cc
@@ -46,7 +46,7 @@
namespace ceres {
namespace internal {
-typedef std::unordered_set<ParameterBlock*> VertexSet;
+using VertexSet = std::unordered_set<ParameterBlock*>;
template <int M, int... Ns>
class DummyCostFunction : public SizedCostFunction<M, Ns...> {
diff --git a/internal/ceres/parameter_block_test.cc b/internal/ceres/parameter_block_test.cc
index a532a9a..53786da 100644
--- a/internal/ceres/parameter_block_test.cc
+++ b/internal/ceres/parameter_block_test.cc
@@ -164,7 +164,7 @@
// Stops computing the plus_jacobian after the first time.
class BadManifold : public Manifold {
public:
- BadManifold() : calls_(0) {}
+ BadManifold() = default;
bool Plus(const double* x,
const double* delta,
@@ -195,7 +195,7 @@
int TangentSize() const final { return 1; }
private:
- mutable int calls_;
+ mutable int calls_{0};
};
TEST(ParameterBlock, DetectBadManifold) {
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
index c66c4d7..2e818ca 100644
--- a/internal/ceres/partitioned_matrix_view_impl.h
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -58,8 +58,8 @@
// e_blocks. For a definition of what an e_block is, please see
// explicit_schur_complement_solver.h
num_row_blocks_e_ = 0;
- for (int r = 0; r < bs->rows.size(); ++r) {
- const std::vector<Cell>& cells = bs->rows[r].cells;
+ for (const auto& row : bs->rows) {
+ const std::vector<Cell>& cells = row.cells;
if (cells[0].block_id < num_col_blocks_e_) {
++num_row_blocks_e_;
}
@@ -142,13 +142,13 @@
const int row_block_pos = bs->rows[r].block.position;
const int row_block_size = bs->rows[r].block.size;
const std::vector<Cell>& cells = bs->rows[r].cells;
- for (int c = 0; c < cells.size(); ++c) {
- const int col_block_id = cells[c].block_id;
+ for (const auto& cell : cells) {
+ const int col_block_id = cell.block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
// clang-format off
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cells[c].position, row_block_size, col_block_size,
+ values + cell.position, row_block_size, col_block_size,
x + col_block_pos - num_cols_e_,
y + row_block_pos);
// clang-format on
@@ -212,13 +212,13 @@
const int row_block_pos = bs->rows[r].block.position;
const int row_block_size = bs->rows[r].block.size;
const std::vector<Cell>& cells = bs->rows[r].cells;
- for (int c = 0; c < cells.size(); ++c) {
- const int col_block_id = cells[c].block_id;
+ for (const auto& cell : cells) {
+ const int col_block_id = cell.block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
// clang-format off
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cells[c].position, row_block_size, col_block_size,
+ values + cell.position, row_block_size, col_block_size,
x + row_block_pos,
y + col_block_pos - num_cols_e_);
// clang-format on
@@ -237,8 +237,7 @@
CreateBlockDiagonalMatrixLayout(int start_col_block,
int end_col_block) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
- CompressedRowBlockStructure* block_diagonal_structure =
- new CompressedRowBlockStructure;
+ auto* block_diagonal_structure = new CompressedRowBlockStructure;
int block_position = 0;
int diagonal_cell_position = 0;
@@ -247,16 +246,16 @@
// each column block.
for (int c = start_col_block; c < end_col_block; ++c) {
const Block& block = bs->cols[c];
- block_diagonal_structure->cols.push_back(Block());
+ block_diagonal_structure->cols.emplace_back();
Block& diagonal_block = block_diagonal_structure->cols.back();
diagonal_block.size = block.size;
diagonal_block.position = block_position;
- block_diagonal_structure->rows.push_back(CompressedRow());
+ block_diagonal_structure->rows.emplace_back();
CompressedRow& row = block_diagonal_structure->rows.back();
row.block = diagonal_block;
- row.cells.push_back(Cell());
+ row.cells.emplace_back();
Cell& cell = row.cells.back();
cell.block_id = c - start_col_block;
cell.position = diagonal_cell_position;
@@ -362,8 +361,8 @@
for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
const int row_block_size = bs->rows[r].block.size;
const std::vector<Cell>& cells = bs->rows[r].cells;
- for (int c = 0; c < cells.size(); ++c) {
- const int col_block_id = cells[c].block_id;
+ for (const auto& cell : cells) {
+ const int col_block_id = cell.block_id;
const int col_block_size = bs->cols[col_block_id].size;
const int diagonal_block_id = col_block_id - num_col_blocks_e_;
const int cell_position =
@@ -372,8 +371,8 @@
// clang-format off
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cells[c].position, row_block_size, col_block_size,
- values + cells[c].position, row_block_size, col_block_size,
+ values + cell.position, row_block_size, col_block_size,
+ values + cell.position, row_block_size, col_block_size,
block_diagonal->mutable_values() + cell_position,
0, 0, col_block_size, col_block_size);
// clang-format on
diff --git a/internal/ceres/polynomial.cc b/internal/ceres/polynomial.cc
index c263f98..96267aa 100644
--- a/internal/ceres/polynomial.cc
+++ b/internal/ceres/polynomial.cc
@@ -376,8 +376,7 @@
double* optimal_value) {
const Vector polynomial = FindInterpolatingPolynomial(samples);
MinimizePolynomial(polynomial, x_min, x_max, optimal_x, optimal_value);
- for (int i = 0; i < samples.size(); ++i) {
- const FunctionSample& sample = samples[i];
+ for (const auto& sample : samples) {
if ((sample.x < x_min) || (sample.x > x_max)) {
continue;
}
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index 7e63147..6433cc7 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -161,9 +161,9 @@
// Preconditioners that depend on access to the low level structure
// of a SparseMatrix.
// clang-format off
-typedef TypedPreconditioner<SparseMatrix> SparseMatrixPreconditioner;
-typedef TypedPreconditioner<BlockSparseMatrix> BlockSparseMatrixPreconditioner;
-typedef TypedPreconditioner<CompressedRowSparseMatrix> CompressedRowSparseMatrixPreconditioner;
+using SparseMatrixPreconditioner = TypedPreconditioner<SparseMatrix>;
+using BlockSparseMatrixPreconditioner = TypedPreconditioner<BlockSparseMatrix>;
+using CompressedRowSparseMatrixPreconditioner = TypedPreconditioner<CompressedRowSparseMatrix>;
// clang-format on
// Wrap a SparseMatrix object as a preconditioner.
diff --git a/internal/ceres/preprocessor.h b/internal/ceres/preprocessor.h
index 8b99dd5..b5db80a 100644
--- a/internal/ceres/preprocessor.h
+++ b/internal/ceres/preprocessor.h
@@ -81,7 +81,7 @@
// A PreprocessedProblem is the result of running the Preprocessor on
// a Problem and Solver::Options object.
struct CERES_NO_EXPORT PreprocessedProblem {
- PreprocessedProblem() : fixed_cost(0.0) {}
+ PreprocessedProblem() = default;
std::string error;
Solver::Options options;
@@ -101,7 +101,7 @@
std::vector<double*> removed_parameter_blocks;
Vector reduced_parameters;
- double fixed_cost;
+ double fixed_cost{0.0};
};
// Common functions used by various preprocessors.
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index 27ada0f..01a22c1 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2021 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -127,7 +127,7 @@
<< "for a parameter with size " << size;
// Ignore the request if there is a block for the given pointer already.
- ParameterMap::iterator it = parameter_block_map_.find(values);
+ auto it = parameter_block_map_.find(values);
if (it != parameter_block_map_.end()) {
if (!options_.disable_all_safety_checks) {
int existing_size = it->second->Size();
@@ -143,11 +143,11 @@
// Before adding the parameter block, also check that it doesn't alias any
// other parameter blocks.
if (!parameter_block_map_.empty()) {
- ParameterMap::iterator lb = parameter_block_map_.lower_bound(values);
+ auto lb = parameter_block_map_.lower_bound(values);
// If lb is not the first block, check the previous block for aliasing.
if (lb != parameter_block_map_.begin()) {
- ParameterMap::iterator previous = lb;
+ auto previous = lb;
--previous;
CheckForNoAliasing(
previous->first, previous->second->Size(), values, size);
@@ -162,7 +162,7 @@
// Pass the index of the new parameter block as well to keep the index in
// sync with the position of the parameter in the program's parameter vector.
- ParameterBlock* new_parameter_block =
+ auto* new_parameter_block =
new ParameterBlock(values, size, program_->parameter_blocks_.size());
// For dynamic problems, add the list of dependent residual blocks, which is
@@ -189,7 +189,7 @@
residual_block);
}
- ResidualBlockSet::iterator it = residual_block_set_.find(residual_block);
+ auto it = residual_block_set_.find(residual_block);
residual_block_set_.erase(it);
}
DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
@@ -204,13 +204,13 @@
// The const casts here are legit, since ResidualBlock holds these
// pointers as const pointers but we have ownership of them and
// have the right to destroy them when the destructor is called.
- CostFunction* cost_function =
+ auto* cost_function =
const_cast<CostFunction*>(residual_block->cost_function());
if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
DecrementValueOrDeleteKey(cost_function, &cost_function_ref_count_);
}
- LossFunction* loss_function =
+ auto* loss_function =
const_cast<LossFunction*>(residual_block->loss_function());
if (options_.loss_function_ownership == TAKE_OWNERSHIP &&
loss_function != nullptr) {
@@ -253,8 +253,8 @@
}
// Collect the unique parameterizations and delete the parameters.
- for (int i = 0; i < program_->parameter_blocks_.size(); ++i) {
- DeleteBlock(program_->parameter_blocks_[i]);
+ for (auto* parameter_block : program_->parameter_blocks_) {
+ DeleteBlock(parameter_block);
}
// Delete the owned parameterizations.
@@ -327,7 +327,7 @@
}
}
- ResidualBlock* new_residual_block =
+ auto* new_residual_block =
new ResidualBlock(cost_function,
loss_function,
parameter_block_ptrs,
@@ -483,8 +483,8 @@
std::vector<ResidualBlock*> residual_blocks_to_remove(
parameter_block->mutable_residual_blocks()->begin(),
parameter_block->mutable_residual_blocks()->end());
- for (int i = 0; i < residual_blocks_to_remove.size(); ++i) {
- InternalRemoveResidualBlock(residual_blocks_to_remove[i]);
+ for (auto* residual_block : residual_blocks_to_remove) {
+ InternalRemoveResidualBlock(residual_block);
}
} else {
// Scan all the residual blocks to remove ones that depend on the parameter
@@ -710,8 +710,7 @@
back_inserter(excluded_parameter_blocks));
variable_parameter_blocks.reserve(excluded_parameter_blocks.size());
- for (int i = 0; i < excluded_parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = excluded_parameter_blocks[i];
+ for (auto* parameter_block : excluded_parameter_blocks) {
if (!parameter_block->IsConstant()) {
variable_parameter_blocks.push_back(parameter_block);
parameter_block->SetConstant();
@@ -796,8 +795,8 @@
// Make the parameter blocks that were temporarily marked constant,
// variable again.
- for (int i = 0; i < variable_parameter_blocks.size(); ++i) {
- variable_parameter_blocks[i]->SetVarying();
+ for (auto* parameter_block : variable_parameter_blocks) {
+ parameter_block->SetVarying();
}
if (status) {
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
index 921bb4e..22073b6 100644
--- a/internal/ceres/problem_impl.h
+++ b/internal/ceres/problem_impl.h
@@ -69,10 +69,10 @@
class CERES_NO_EXPORT ProblemImpl {
public:
- typedef std::map<double*, ParameterBlock*> ParameterMap;
- typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
- typedef std::map<CostFunction*, int> CostFunctionRefCount;
- typedef std::map<LossFunction*, int> LossFunctionRefCount;
+ using ParameterMap = std::map<double*, ParameterBlock*>;
+ using ResidualBlockSet = std::unordered_set<ResidualBlock*>;
+ using CostFunctionRefCount = std::map<CostFunction*, int>;
+ using LossFunctionRefCount = std::map<LossFunction*, int>;
ProblemImpl();
explicit ProblemImpl(const Problem::Options& options);
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
index 9a73034..641dad9 100644
--- a/internal/ceres/problem_test.cc
+++ b/internal/ceres/problem_test.cc
@@ -1293,8 +1293,7 @@
// Attempt to remove a cast pointer never added as a residual.
int trash_memory = 1234;
- ResidualBlock* invalid_residual =
- reinterpret_cast<ResidualBlock*>(&trash_memory);
+ auto* invalid_residual = reinterpret_cast<ResidualBlock*>(&trash_memory);
EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(invalid_residual),
"not found");
@@ -1363,40 +1362,40 @@
ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, nullptr, y, z, w);
{
- ResidualBlockId expected_residuals[] = {r_yzw, 0};
+ ResidualBlockId expected_residuals[] = {r_yzw, nullptr};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
ResidualBlock* r_yz = problem->AddResidualBlock(cost_yz, nullptr, y, z);
{
- ResidualBlockId expected_residuals[] = {r_yzw, r_yz, 0};
+ ResidualBlockId expected_residuals[] = {r_yzw, r_yz, nullptr};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
ResidualBlock* r_yw = problem->AddResidualBlock(cost_yw, nullptr, y, w);
{
- ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, 0};
+ ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, nullptr};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
ResidualBlock* r_zw = problem->AddResidualBlock(cost_zw, nullptr, z, w);
{
- ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, 0};
+ ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, nullptr};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
ResidualBlock* r_y = problem->AddResidualBlock(cost_y, nullptr, y);
{
- ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, r_y, 0};
+ ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, r_y, nullptr};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
ResidualBlock* r_z = problem->AddResidualBlock(cost_z, nullptr, z);
{
ResidualBlock *expected_residuals[] = {
- r_yzw, r_yz, r_yw, r_zw, r_y, r_z, 0
+ r_yzw, r_yz, r_yw, r_zw, r_y, r_z, nullptr
};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
ResidualBlock* r_w = problem->AddResidualBlock(cost_w, nullptr, w);
{
ResidualBlock *expected_residuals[] = {
- r_yzw, r_yz, r_yw, r_zw, r_y, r_z, r_w, 0
+ r_yzw, r_yz, r_yw, r_zw, r_y, r_z, r_w, nullptr
};
ExpectProblemHasResidualBlocks(*problem, expected_residuals);
}
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
index d9e6db2..66dc90b 100644
--- a/internal/ceres/program.cc
+++ b/internal/ceres/program.cc
@@ -76,33 +76,32 @@
}
bool Program::StateVectorToParameterBlocks(const double* state) {
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- if (!parameter_blocks_[i]->IsConstant() &&
- !parameter_blocks_[i]->SetState(state)) {
+ for (auto* parameter_block : parameter_blocks_) {
+ if (!parameter_block->IsConstant() && !parameter_block->SetState(state)) {
return false;
}
- state += parameter_blocks_[i]->Size();
+ state += parameter_block->Size();
}
return true;
}
void Program::ParameterBlocksToStateVector(double* state) const {
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- parameter_blocks_[i]->GetState(state);
- state += parameter_blocks_[i]->Size();
+ for (auto* parameter_block : parameter_blocks_) {
+ parameter_block->GetState(state);
+ state += parameter_block->Size();
}
}
void Program::CopyParameterBlockStateToUserState() {
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- parameter_blocks_[i]->GetState(parameter_blocks_[i]->mutable_user_state());
+ for (auto* parameter_block : parameter_blocks_) {
+ parameter_block->GetState(parameter_block->mutable_user_state());
}
}
bool Program::SetParameterBlockStatePtrsToUserStatePtrs() {
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- if (!parameter_blocks_[i]->IsConstant() &&
- !parameter_blocks_[i]->SetState(parameter_blocks_[i]->user_state())) {
+ for (auto* parameter_block : parameter_blocks_) {
+ if (!parameter_block->IsConstant() &&
+ !parameter_block->SetState(parameter_block->user_state())) {
return false;
}
}
@@ -112,13 +111,13 @@
bool Program::Plus(const double* state,
const double* delta,
double* state_plus_delta) const {
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- if (!parameter_blocks_[i]->Plus(state, delta, state_plus_delta)) {
+ for (auto* parameter_block : parameter_blocks_) {
+ if (!parameter_block->Plus(state, delta, state_plus_delta)) {
return false;
}
- state += parameter_blocks_[i]->Size();
- delta += parameter_blocks_[i]->TangentSize();
- state_plus_delta += parameter_blocks_[i]->Size();
+ state += parameter_block->Size();
+ delta += parameter_block->TangentSize();
+ state_plus_delta += parameter_block->Size();
}
return true;
}
@@ -126,8 +125,7 @@
void Program::SetParameterOffsetsAndIndex() {
// Set positions for all parameters appearing as arguments to residuals to one
// past the end of the parameter block array.
- for (int i = 0; i < residual_blocks_.size(); ++i) {
- ResidualBlock* residual_block = residual_blocks_[i];
+ for (auto* residual_block : residual_blocks_) {
for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
residual_block->parameter_blocks()[j]->set_index(-1);
}
@@ -176,8 +174,7 @@
bool Program::ParameterBlocksAreFinite(std::string* message) const {
CHECK(message != nullptr);
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- const ParameterBlock* parameter_block = parameter_blocks_[i];
+ for (auto* parameter_block : parameter_blocks_) {
const double* array = parameter_block->user_state();
const int size = parameter_block->Size();
const int invalid_index = FindInvalidValue(size, array);
@@ -197,8 +194,7 @@
}
bool Program::IsBoundsConstrained() const {
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- const ParameterBlock* parameter_block = parameter_blocks_[i];
+ for (auto* parameter_block : parameter_blocks_) {
if (parameter_block->IsConstant()) {
continue;
}
@@ -217,8 +213,7 @@
bool Program::IsFeasible(std::string* message) const {
CHECK(message != nullptr);
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- const ParameterBlock* parameter_block = parameter_blocks_[i];
+ for (auto* parameter_block : parameter_blocks_) {
const double* parameters = parameter_block->user_state();
const int size = parameter_block->Size();
if (parameter_block->IsConstant()) {
@@ -308,8 +303,8 @@
// Mark all the parameters as unused. Abuse the index member of the
// parameter blocks for the marking.
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- parameter_blocks_[i]->set_index(-1);
+ for (auto* parameter_block : parameter_blocks_) {
+ parameter_block->set_index(-1);
}
// Filter out residual that have all-constant parameters, and mark
@@ -381,8 +376,7 @@
// Filter out unused or fixed parameter blocks.
int num_active_parameter_blocks = 0;
removed_parameter_blocks->clear();
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks_[i];
+ for (auto* parameter_block : parameter_blocks_) {
if (parameter_block->index() == -1) {
removed_parameter_blocks->push_back(
parameter_block->mutable_user_state());
@@ -473,24 +467,24 @@
int Program::NumResiduals() const {
int num_residuals = 0;
- for (int i = 0; i < residual_blocks_.size(); ++i) {
- num_residuals += residual_blocks_[i]->NumResiduals();
+ for (auto* residual_block : residual_blocks_) {
+ num_residuals += residual_block->NumResiduals();
}
return num_residuals;
}
int Program::NumParameters() const {
int num_parameters = 0;
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- num_parameters += parameter_blocks_[i]->Size();
+ for (auto* parameter_block : parameter_blocks_) {
+ num_parameters += parameter_block->Size();
}
return num_parameters;
}
int Program::NumEffectiveParameters() const {
int num_parameters = 0;
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
- num_parameters += parameter_blocks_[i]->TangentSize();
+ for (auto* parameter_block : parameter_blocks_) {
+ num_parameters += parameter_block->TangentSize();
}
return num_parameters;
}
@@ -501,19 +495,18 @@
int Program::MaxScratchDoublesNeededForEvaluate() const {
// Compute the scratch space needed for evaluate.
int max_scratch_bytes_for_evaluate = 0;
- for (int i = 0; i < residual_blocks_.size(); ++i) {
+ for (auto* residual_block : residual_blocks_) {
max_scratch_bytes_for_evaluate =
std::max(max_scratch_bytes_for_evaluate,
- residual_blocks_[i]->NumScratchDoublesForEvaluate());
+ residual_block->NumScratchDoublesForEvaluate());
}
return max_scratch_bytes_for_evaluate;
}
int Program::MaxDerivativesPerResidualBlock() const {
int max_derivatives = 0;
- for (int i = 0; i < residual_blocks_.size(); ++i) {
+ for (auto* residual_block : residual_blocks_) {
int derivatives = 0;
- ResidualBlock* residual_block = residual_blocks_[i];
int num_parameters = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameters; ++j) {
derivatives += residual_block->NumResiduals() *
@@ -526,18 +519,17 @@
int Program::MaxParametersPerResidualBlock() const {
int max_parameters = 0;
- for (int i = 0; i < residual_blocks_.size(); ++i) {
+ for (auto* residual_block : residual_blocks_) {
max_parameters =
- std::max(max_parameters, residual_blocks_[i]->NumParameterBlocks());
+ std::max(max_parameters, residual_block->NumParameterBlocks());
}
return max_parameters;
}
int Program::MaxResidualsPerResidualBlock() const {
int max_residuals = 0;
- for (int i = 0; i < residual_blocks_.size(); ++i) {
- max_residuals =
- std::max(max_residuals, residual_blocks_[i]->NumResiduals());
+ for (auto* residual_block : residual_blocks_) {
+ max_residuals = std::max(max_residuals, residual_block->NumResiduals());
}
return max_residuals;
}
diff --git a/internal/ceres/random.h b/internal/ceres/random.h
index 14f2ebd..0495d67 100644
--- a/internal/ceres/random.h
+++ b/internal/ceres/random.h
@@ -50,7 +50,7 @@
}
inline double RandDouble() {
- double r = static_cast<double>(rand());
+ auto r = static_cast<double>(rand());
return r / RAND_MAX;
}
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
index 31d9ae0..488ced0 100644
--- a/internal/ceres/reorder_program.cc
+++ b/internal/ceres/reorder_program.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -88,8 +88,8 @@
#if defined(CERES_USE_EIGEN_SPARSE)
Eigen::SparseMatrix<int> CreateBlockJacobian(
const TripletSparseMatrix& block_jacobian_transpose) {
- typedef Eigen::SparseMatrix<int> SparseMatrix;
- typedef Eigen::Triplet<int> Triplet;
+ using SparseMatrix = Eigen::SparseMatrix<int>;
+ using Triplet = Eigen::Triplet<int>;
const int* rows = block_jacobian_transpose.rows();
const int* cols = block_jacobian_transpose.cols();
@@ -97,7 +97,7 @@
vector<Triplet> triplets;
triplets.reserve(num_nonzeros);
for (int i = 0; i < num_nonzeros; ++i) {
- triplets.push_back(Triplet(cols[i], rows[i], 1));
+ triplets.emplace_back(cols[i], rows[i], 1);
}
SparseMatrix block_jacobian(block_jacobian_transpose.num_cols(),
@@ -127,9 +127,9 @@
ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
} else {
vector<int> constraints;
- for (int i = 0; i < parameter_blocks.size(); ++i) {
+ for (auto* parameter_block : parameter_blocks) {
constraints.push_back(parameter_block_ordering.GroupId(
- parameter_blocks[i]->mutable_user_state()));
+ parameter_block->mutable_user_state()));
}
// Renumber the entries of constraints to be contiguous integers
@@ -188,7 +188,7 @@
// things. The right thing to do here would be to get a compressed
// row sparse matrix representation of the jacobian and go from
// there. But that is a project for another day.
- typedef Eigen::SparseMatrix<int> SparseMatrix;
+ using SparseMatrix = Eigen::SparseMatrix<int>;
const SparseMatrix block_jacobian =
CreateBlockJacobian(tsm_block_jacobian_transpose);
@@ -314,8 +314,8 @@
<< "to the developers.";
}
// Sanity check #2: No nullptr's left behind.
- for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
- CHECK(reordered_residual_blocks[i] != nullptr)
+ for (auto* residual_block : reordered_residual_blocks) {
+ CHECK(residual_block != nullptr)
<< "Congratulations, you found a Ceres bug! Please report this error "
<< "to the developers.";
}
@@ -339,9 +339,9 @@
vector<ParameterBlock*>& parameter_blocks =
*(program->mutable_parameter_blocks());
- for (int i = 0; i < parameter_blocks.size(); ++i) {
+ for (auto* parameter_block : parameter_blocks) {
constraints.push_back(parameter_block_ordering.GroupId(
- parameter_blocks[i]->mutable_user_state()));
+ parameter_block->mutable_user_state()));
}
// Renumber the entries of constraints to be contiguous integers as
@@ -378,7 +378,7 @@
std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
program->CreateJacobianBlockSparsityTranspose());
- typedef Eigen::SparseMatrix<int> SparseMatrix;
+ using SparseMatrix = Eigen::SparseMatrix<int>;
const SparseMatrix block_jacobian =
CreateBlockJacobian(*tsm_block_jacobian_transpose);
const int num_rows = block_jacobian.rows();
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
index 3184376..d97e385 100644
--- a/internal/ceres/rotation_test.cc
+++ b/internal/ceres/rotation_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -326,16 +326,16 @@
// Make an axis by choosing three random numbers in [-1, 1) and
// normalizing.
double norm = 0;
- for (int i = 0; i < 3; i++) {
- axis_angle[i] = RandDouble() * 2 - 1;
- norm += axis_angle[i] * axis_angle[i];
+ for (double& coeff : axis_angle) {
+ coeff = RandDouble() * 2 - 1;
+ norm += coeff * coeff;
}
norm = sqrt(norm);
// Angle in [-pi, pi).
double theta = kPi * 2 * RandDouble() - kPi;
- for (int i = 0; i < 3; i++) {
- axis_angle[i] = axis_angle[i] * theta / norm;
+ for (double& coeff : axis_angle) {
+ coeff = coeff * theta / norm;
}
double quaternion[4];
@@ -358,14 +358,14 @@
double quaternion[4];
// Choose four random numbers in [-1, 1) and normalize.
double norm = 0;
- for (int i = 0; i < 4; i++) {
- quaternion[i] = RandDouble() * 2 - 1;
- norm += quaternion[i] * quaternion[i];
+ for (double& coeff : quaternion) {
+ coeff = RandDouble() * 2 - 1;
+ norm += coeff * coeff;
}
norm = sqrt(norm);
- for (int i = 0; i < 4; i++) {
- quaternion[i] = quaternion[i] / norm;
+ for (double& coeff : quaternion) {
+ coeff = coeff / norm;
}
double axis_angle[3];
@@ -435,9 +435,9 @@
// Make an axis by choosing three random numbers in [-1, 1) and
// normalizing.
double norm = 0;
- for (int i = 0; i < 3; i++) {
- in_axis_angle[i] = RandDouble() * 2 - 1;
- norm += in_axis_angle[i] * in_axis_angle[i];
+ for (double& coeff : in_axis_angle) {
+ coeff = RandDouble() * 2 - 1;
+ norm += coeff * coeff;
}
norm = sqrt(norm);
@@ -445,8 +445,8 @@
const double kMaxSmallAngle = 1e-8;
double theta = kPi - kMaxSmallAngle * RandDouble();
- for (int i = 0; i < 3; i++) {
- in_axis_angle[i] *= (theta / norm);
+ for (double& coeff : in_axis_angle) {
+ coeff *= (theta / norm);
}
AngleAxisToRotationMatrix(in_axis_angle, matrix);
RotationMatrixToAngleAxis(matrix, out_axis_angle);
@@ -535,16 +535,16 @@
// Make an axis by choosing three random numbers in [-1, 1) and
// normalizing.
double norm = 0;
- for (int i = 0; i < 3; i++) {
- axis_angle[i] = RandDouble() * 2 - 1;
- norm += axis_angle[i] * axis_angle[i];
+ for (double& i : axis_angle) {
+ i = RandDouble() * 2 - 1;
+ norm += i * i;
}
norm = sqrt(norm);
// Angle in [-pi, pi).
double theta = kPi * 2 * RandDouble() - kPi;
- for (int i = 0; i < 3; i++) {
- axis_angle[i] = axis_angle[i] * theta / norm;
+ for (double& i : axis_angle) {
+ i = i * theta / norm;
}
double matrix[9];
@@ -568,16 +568,16 @@
// Make an axis by choosing three random numbers in [-1, 1) and
// normalizing.
double norm = 0;
- for (int i = 0; i < 3; i++) {
- axis_angle[i] = RandDouble() * 2 - 1;
- norm += axis_angle[i] * axis_angle[i];
+ for (double& i : axis_angle) {
+ i = RandDouble() * 2 - 1;
+ norm += i * i;
}
norm = sqrt(norm);
// Tiny theta.
double theta = 1e-16 * (kPi * 2 * RandDouble() - kPi);
- for (int i = 0; i < 3; i++) {
- axis_angle[i] = axis_angle[i] * theta / norm;
+ for (double& i : axis_angle) {
+ i = i * theta / norm;
}
double matrix[9];
@@ -648,8 +648,8 @@
srand(5);
for (int trial = 0; trial < kNumTrials; ++trial) {
double euler_angles_degrees[3];
- for (int i = 0; i < 3; ++i) {
- euler_angles_degrees[i] = RandDouble() * 360.0 - 180.0;
+ for (double& euler_angles_degree : euler_angles_degrees) {
+ euler_angles_degree = RandDouble() * 360.0 - 180.0;
}
double rotation_matrix[9];
EulerAnglesToRotationMatrix(euler_angles_degrees, 3, rotation_matrix);
@@ -660,8 +660,8 @@
// Tests using Jets for specific behavior involving auto differentiation
// near singularity points.
-typedef Jet<double, 3> J3;
-typedef Jet<double, 4> J4;
+using J3 = Jet<double, 3>;
+using J4 = Jet<double, 4>;
namespace {
@@ -947,8 +947,8 @@
}
const double inv_norm = theta / sqrt(norm2);
- for (int k = 0; k < 3; ++k) {
- angle_axis[k] *= inv_norm;
+ for (double& angle_axi : angle_axis) {
+ angle_axi *= inv_norm;
}
AngleAxisToRotationMatrix(angle_axis, R);
@@ -990,8 +990,8 @@
double theta = (2.0 * i * 0.0001 - 1.0) * 1e-16;
const double inv_norm = theta / sqrt(norm2);
- for (int k = 0; k < 3; ++k) {
- angle_axis[k] *= inv_norm;
+ for (double& angle_axi : angle_axis) {
+ angle_axi *= inv_norm;
}
AngleAxisToRotationMatrix(angle_axis, R);
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 3f8cfea..bb442b4 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -210,8 +210,7 @@
summary.termination_type = LINEAR_SOLVER_SUCCESS;
summary.message = "Success.";
- BlockRandomAccessDenseMatrix* m =
- down_cast<BlockRandomAccessDenseMatrix*>(mutable_lhs());
+ auto* m = down_cast<BlockRandomAccessDenseMatrix*>(mutable_lhs());
const int num_rows = m->num_rows();
// The case where there are no f blocks, and the system is block
@@ -294,8 +293,8 @@
CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
for (int i = 0; i < row.cells.size(); ++i) {
int r_block1_id = row.cells[i].block_id - num_eliminate_blocks;
- for (int j = 0; j < row.cells.size(); ++j) {
- int r_block2_id = row.cells[j].block_id - num_eliminate_blocks;
+ for (const auto& cell : row.cells) {
+ int r_block2_id = cell.block_id - num_eliminate_blocks;
if (r_block1_id <= r_block2_id) {
block_pairs.insert(make_pair(r_block1_id, r_block2_id));
}
@@ -369,8 +368,7 @@
std::make_unique<BlockRandomAccessDiagonalMatrix>(blocks_);
}
- BlockRandomAccessSparseMatrix* sc =
- down_cast<BlockRandomAccessSparseMatrix*>(mutable_lhs());
+ auto* sc = down_cast<BlockRandomAccessSparseMatrix*>(mutable_lhs());
// Extract block diagonal from the Schur complement to construct the
// schur_jacobi preconditioner.
diff --git a/internal/ceres/schur_eliminator.h b/internal/ceres/schur_eliminator.h
index f77acbd..94cd1cc 100644
--- a/internal/ceres/schur_eliminator.h
+++ b/internal/ceres/schur_eliminator.h
@@ -273,9 +273,9 @@
// buffer_layout[z1] = 0
// buffer_layout[z5] = y1 * z1
// buffer_layout[z2] = y1 * z1 + y1 * z5
- typedef std::map<int, int> BufferLayoutType;
+ using BufferLayoutType = std::map<int, int>;
struct Chunk {
- Chunk(int start) : size(0), start(start) {}
+ explicit Chunk(int start) : size(0), start(start) {}
int size;
int start;
BufferLayoutType buffer_layout;
diff --git a/internal/ceres/schur_eliminator_benchmark.cc b/internal/ceres/schur_eliminator_benchmark.cc
index 9dac07d..4e19582 100644
--- a/internal/ceres/schur_eliminator_benchmark.cc
+++ b/internal/ceres/schur_eliminator_benchmark.cc
@@ -48,7 +48,7 @@
class BenchmarkData {
public:
explicit BenchmarkData(const int num_e_blocks) {
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ auto* bs = new CompressedRowBlockStructure;
bs->cols.resize(num_e_blocks + 1);
int col_pos = 0;
for (int i = 0; i < num_e_blocks; ++i) {
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
index 32344f5..de3ba3e 100644
--- a/internal/ceres/schur_eliminator_impl.h
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -524,7 +524,7 @@
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = inverse_ete.rows();
- BufferLayoutType::const_iterator it1 = buffer_layout.begin();
+ auto it1 = buffer_layout.begin();
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
@@ -541,7 +541,7 @@
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
// clang-format on
- BufferLayoutType::const_iterator it2 = it1;
+ auto it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
diff --git a/internal/ceres/schur_eliminator_test.cc b/internal/ceres/schur_eliminator_test.cc
index f586476..0b20d70 100644
--- a/internal/ceres/schur_eliminator_test.cc
+++ b/internal/ceres/schur_eliminator_test.cc
@@ -228,7 +228,7 @@
constexpr int kFBlockSize = 6;
constexpr int num_e_blocks = 5;
- CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ auto* bs = new CompressedRowBlockStructure;
bs->cols.resize(num_e_blocks + 1);
int col_pos = 0;
for (int i = 0; i < num_e_blocks; ++i) {
diff --git a/internal/ceres/schur_jacobi_preconditioner.cc b/internal/ceres/schur_jacobi_preconditioner.cc
index ffab66f..3ecec72 100644
--- a/internal/ceres/schur_jacobi_preconditioner.cc
+++ b/internal/ceres/schur_jacobi_preconditioner.cc
@@ -43,9 +43,8 @@
namespace internal {
SchurJacobiPreconditioner::SchurJacobiPreconditioner(
- const CompressedRowBlockStructure& bs,
- const Preconditioner::Options& options)
- : options_(options) {
+ const CompressedRowBlockStructure& bs, Preconditioner::Options options)
+ : options_(std::move(options)) {
CHECK_GT(options_.elimination_groups.size(), 1);
CHECK_GT(options_.elimination_groups[0], 0);
const int num_blocks = bs.cols.size() - options_.elimination_groups[0];
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
index 59f271f..a43bc33 100644
--- a/internal/ceres/schur_jacobi_preconditioner.h
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -84,7 +84,7 @@
// It has the same structural requirement as other Schur complement
// based solvers. Please see schur_eliminator.h for more details.
SchurJacobiPreconditioner(const CompressedRowBlockStructure& bs,
- const Preconditioner::Options& options);
+ Preconditioner::Options options);
SchurJacobiPreconditioner(const SchurJacobiPreconditioner&) = delete;
void operator=(const SchurJacobiPreconditioner&) = delete;
diff --git a/internal/ceres/scoped_thread_token.h b/internal/ceres/scoped_thread_token.h
index a126412..533bfd5 100644
--- a/internal/ceres/scoped_thread_token.h
+++ b/internal/ceres/scoped_thread_token.h
@@ -41,19 +41,18 @@
// constructor and puts that token back with destruction.
class CERES_NO_EXPORT ScopedThreadToken {
public:
- ScopedThreadToken(ThreadTokenProvider* provider)
+ explicit ScopedThreadToken(ThreadTokenProvider* provider)
: provider_(provider), token_(provider->Acquire()) {}
~ScopedThreadToken() { provider_->Release(token_); }
+ ScopedThreadToken(ScopedThreadToken&) = delete;
+ ScopedThreadToken& operator=(ScopedThreadToken&) = delete;
int token() const { return token_; }
private:
ThreadTokenProvider* provider_;
int token_;
-
- ScopedThreadToken(ScopedThreadToken&);
- ScopedThreadToken& operator=(ScopedThreadToken&);
};
} // namespace internal
diff --git a/internal/ceres/small_blas.h b/internal/ceres/small_blas.h
index 856a2a2..1cf41a5 100644
--- a/internal/ceres/small_blas.h
+++ b/internal/ceres/small_blas.h
@@ -210,7 +210,7 @@
// Process the couple columns in remainder if present.
if (NUM_COL_C & 2) {
- int col = NUM_COL_C & (int)(~(span - 1));
+ int col = NUM_COL_C & (~(span - 1));
const double* pa = &A[0];
for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
const double* pb = &B[col];
@@ -232,7 +232,7 @@
}
// Calculate the main part with multiples of 4.
- int col_m = NUM_COL_C & (int)(~(span - 1));
+ int col_m = NUM_COL_C & (~(span - 1));
for (int col = 0; col < col_m; col += span) {
for (int row = 0; row < NUM_ROW_C; ++row) {
const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
@@ -315,7 +315,7 @@
// Process the couple columns in remainder if present.
if (NUM_COL_C & 2) {
- int col = NUM_COL_C & (int)(~(span - 1));
+ int col = NUM_COL_C & (~(span - 1));
for (int row = 0; row < NUM_ROW_C; ++row) {
const double* pa = &A[row];
const double* pb = &B[col];
@@ -339,7 +339,7 @@
}
// Process the main part with multiples of 4.
- int col_m = NUM_COL_C & (int)(~(span - 1));
+ int col_m = NUM_COL_C & (~(span - 1));
for (int col = 0; col < col_m; col += span) {
for (int row = 0; row < NUM_ROW_C; ++row) {
const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
@@ -435,7 +435,7 @@
// Process the couple rows in remainder if present.
if (NUM_ROW_A & 2) {
- int row = NUM_ROW_A & (int)(~(span - 1));
+ int row = NUM_ROW_A & (~(span - 1));
const double* pa1 = &A[row * NUM_COL_A];
const double* pa2 = pa1 + NUM_COL_A;
const double* pb = &b[0];
@@ -454,7 +454,7 @@
}
// Calculate the main part with multiples of 4.
- int row_m = NUM_ROW_A & (int)(~(span - 1));
+ int row_m = NUM_ROW_A & (~(span - 1));
for (int row = 0; row < row_m; row += span) {
// clang-format off
MVM_mat4x1(NUM_COL_A, &A[row * NUM_COL_A], NUM_COL_A,
@@ -522,7 +522,7 @@
// Process the couple columns in remainder if present.
if (NUM_COL_A & 2) {
- int row = NUM_COL_A & (int)(~(span - 1));
+ int row = NUM_COL_A & (~(span - 1));
const double* pa = &A[row];
const double* pb = &b[0];
double tmp1 = 0.0, tmp2 = 0.0;
@@ -543,7 +543,7 @@
}
// Calculate the main part with multiples of 4.
- int row_m = NUM_COL_A & (int)(~(span - 1));
+ int row_m = NUM_COL_A & (~(span - 1));
for (int row = 0; row < row_m; row += span) {
// clang-format off
MTV_mat4x1(NUM_ROW_A, &A[row], NUM_COL_A,
diff --git a/internal/ceres/solver_test.cc b/internal/ceres/solver_test.cc
index 3ecde20..7b34bf2 100644
--- a/internal/ceres/solver_test.cc
+++ b/internal/ceres/solver_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2019 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -118,8 +118,8 @@
num_iterations =
summary.num_successful_steps + summary.num_unsuccessful_steps;
EXPECT_GT(num_iterations, 1);
- for (int i = 0; i < callback.x_values.size(); ++i) {
- EXPECT_EQ(50.0, callback.x_values[i]);
+ for (double value : callback.x_values) {
+ EXPECT_EQ(50.0, value);
}
// Second: update_state_every_iteration=true, evaluation_callback=nullptr.
diff --git a/internal/ceres/sparse_cholesky_test.cc b/internal/ceres/sparse_cholesky_test.cc
index 363f991..9422bf5 100644
--- a/internal/ceres/sparse_cholesky_test.cc
+++ b/internal/ceres/sparse_cholesky_test.cc
@@ -147,8 +147,8 @@
<< eigen_lhs;
}
-typedef ::testing::tuple<SparseLinearAlgebraLibraryType, OrderingType, bool>
- Param;
+using Param =
+ ::testing::tuple<SparseLinearAlgebraLibraryType, OrderingType, bool>;
std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
Param param = info.param;
@@ -267,8 +267,8 @@
using testing::Return;
TEST(RefinedSparseCholesky, StorageType) {
- MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
- MockIterativeRefiner* mock_iterative_refiner = new MockIterativeRefiner;
+ auto* mock_sparse_cholesky = new MockSparseCholesky;
+ auto* mock_iterative_refiner = new MockIterativeRefiner;
EXPECT_CALL(*mock_sparse_cholesky, StorageType())
.Times(1)
.WillRepeatedly(Return(CompressedRowSparseMatrix::UPPER_TRIANGULAR));
@@ -282,8 +282,8 @@
};
TEST(RefinedSparseCholesky, Factorize) {
- MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
- MockIterativeRefiner* mock_iterative_refiner = new MockIterativeRefiner;
+ auto* mock_sparse_cholesky = new MockSparseCholesky;
+ auto* mock_iterative_refiner = new MockIterativeRefiner;
EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
.Times(1)
.WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
@@ -299,8 +299,8 @@
};
TEST(RefinedSparseCholesky, FactorAndSolveWithUnsuccessfulFactorization) {
- MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
- MockIterativeRefiner* mock_iterative_refiner = new MockIterativeRefiner;
+ auto* mock_sparse_cholesky = new MockSparseCholesky;
+ auto* mock_iterative_refiner = new MockIterativeRefiner;
EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
.Times(1)
.WillRepeatedly(Return(LINEAR_SOLVER_FAILURE));
@@ -320,7 +320,7 @@
};
TEST(RefinedSparseCholesky, FactorAndSolveWithSuccess) {
- MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
+ auto* mock_sparse_cholesky = new MockSparseCholesky;
std::unique_ptr<MockIterativeRefiner> mock_iterative_refiner(
new MockIterativeRefiner);
EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
index b6b0c2e..221530c 100644
--- a/internal/ceres/subset_preconditioner.cc
+++ b/internal/ceres/subset_preconditioner.cc
@@ -32,6 +32,7 @@
#include <memory>
#include <string>
+#include <utility>
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/inner_product_computer.h"
@@ -42,9 +43,9 @@
namespace ceres {
namespace internal {
-SubsetPreconditioner::SubsetPreconditioner(
- const Preconditioner::Options& options, const BlockSparseMatrix& A)
- : options_(options), num_cols_(A.num_cols()) {
+SubsetPreconditioner::SubsetPreconditioner(Preconditioner::Options options,
+ const BlockSparseMatrix& A)
+ : options_(std::move(options)), num_cols_(A.num_cols()) {
CHECK_GE(options_.subset_preconditioner_start_row_block, 0)
<< "Congratulations, you found a bug in Ceres. Please report it.";
@@ -66,7 +67,7 @@
bool SubsetPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
const double* D) {
- BlockSparseMatrix* m = const_cast<BlockSparseMatrix*>(&A);
+ auto* m = const_cast<BlockSparseMatrix*>(&A);
const CompressedRowBlockStructure* bs = m->block_structure();
// A = [P]
diff --git a/internal/ceres/subset_preconditioner.h b/internal/ceres/subset_preconditioner.h
index 7b6c317..6d07995 100644
--- a/internal/ceres/subset_preconditioner.h
+++ b/internal/ceres/subset_preconditioner.h
@@ -71,7 +71,7 @@
class CERES_NO_EXPORT SubsetPreconditioner
: public BlockSparseMatrixPreconditioner {
public:
- SubsetPreconditioner(const Preconditioner::Options& options,
+ SubsetPreconditioner(Preconditioner::Options options,
const BlockSparseMatrix& A);
~SubsetPreconditioner() override;
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
index 7d606f1..27e819b 100644
--- a/internal/ceres/subset_preconditioner_test.cc
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -75,7 +75,7 @@
dense_triangular_lhs, rhs, solution);
}
-typedef ::testing::tuple<SparseLinearAlgebraLibraryType, bool> Param;
+using Param = ::testing::tuple<SparseLinearAlgebraLibraryType, bool>;
std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
Param param = info.param;
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index 60b415c..3f62e7c 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -305,7 +305,7 @@
std::string* message) final;
private:
- SuiteSparseCholesky(const OrderingType ordering_type);
+ explicit SuiteSparseCholesky(const OrderingType ordering_type);
const OrderingType ordering_type_;
SuiteSparse ss_;
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
index 3f4450a..c270def 100644
--- a/internal/ceres/system_test.cc
+++ b/internal/ceres/system_test.cc
@@ -139,7 +139,7 @@
double PowellsFunction::kResidualTolerance = 1e-8;
-typedef SystemTest<PowellsFunction> PowellTest;
+using PowellTest = SystemTest<PowellsFunction>;
TEST_F(PowellTest, DenseQR) {
PowellsFunction powells_function;
diff --git a/internal/ceres/thread_pool.cc b/internal/ceres/thread_pool.cc
index b503537..57f01af 100644
--- a/internal/ceres/thread_pool.cc
+++ b/internal/ceres/thread_pool.cc
@@ -83,7 +83,7 @@
GetNumAllowedThreads(num_threads) - num_current_threads;
for (int i = 0; i < create_num_threads; ++i) {
- thread_pool_.push_back(std::thread(&ThreadPool::ThreadMainLoop, this));
+ thread_pool_.emplace_back(&ThreadPool::ThreadMainLoop, this);
}
}
diff --git a/internal/ceres/thread_token_provider.h b/internal/ceres/thread_token_provider.h
index cd9f58f..918c687 100644
--- a/internal/ceres/thread_token_provider.h
+++ b/internal/ceres/thread_token_provider.h
@@ -68,7 +68,7 @@
//
class CERES_NO_EXPORT ThreadTokenProvider {
public:
- ThreadTokenProvider(int num_threads);
+ explicit ThreadTokenProvider(int num_threads);
// Returns the first token from the queue. The acquired value must be
// given back by Release().
diff --git a/internal/ceres/tiny_solver_autodiff_function_test.cc b/internal/ceres/tiny_solver_autodiff_function_test.cc
index 2598188..36863df 100644
--- a/internal/ceres/tiny_solver_autodiff_function_test.cc
+++ b/internal/ceres/tiny_solver_autodiff_function_test.cc
@@ -60,8 +60,8 @@
static double const kTolerance = std::numeric_limits<double>::epsilon() * 10;
TEST(TinySolverAutoDiffFunction, SimpleFunction) {
- typedef TinySolverAutoDiffFunction<AutoDiffTestFunctor, 2, 3>
- AutoDiffTestFunction;
+ using AutoDiffTestFunction =
+ TinySolverAutoDiffFunction<AutoDiffTestFunctor, 2, 3>;
AutoDiffTestFunctor autodiff_test_functor;
AutoDiffTestFunction f(autodiff_test_functor);
@@ -97,7 +97,7 @@
class DynamicResidualsFunctor {
public:
- typedef double Scalar;
+ using Scalar = double;
enum {
NUM_RESIDUALS = Eigen::Dynamic,
NUM_PARAMETERS = 3,
@@ -140,7 +140,7 @@
EXPECT_GT(residuals.squaredNorm() / 2.0, 1e-10);
TinySolver<AutoDiffCostFunctor> solver;
- solver.Solve(f, &x0);
+ solver.Solve(f_autodiff, &x0);
EXPECT_NEAR(0.0, solver.summary.final_cost, 1e-10);
}
diff --git a/internal/ceres/tiny_solver_cost_function_adapter_test.cc b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
index ff8c070..c5d7a02 100644
--- a/internal/ceres/tiny_solver_cost_function_adapter_test.cc
+++ b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
@@ -68,8 +68,8 @@
template <int kNumResiduals, int kNumParameters>
void TestHelper() {
std::unique_ptr<CostFunction> cost_function(new CostFunction2x3);
- typedef TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters>
- CostFunctionAdapter;
+ using CostFunctionAdapter =
+ TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters>;
CostFunctionAdapter cfa(*cost_function);
EXPECT_EQ(CostFunctionAdapter::NUM_RESIDUALS, kNumResiduals);
EXPECT_EQ(CostFunctionAdapter::NUM_PARAMETERS, kNumParameters);
diff --git a/internal/ceres/tiny_solver_test.cc b/internal/ceres/tiny_solver_test.cc
index 45c021f..500ac0d 100644
--- a/internal/ceres/tiny_solver_test.cc
+++ b/internal/ceres/tiny_solver_test.cc
@@ -39,13 +39,13 @@
namespace ceres {
-typedef Eigen::Matrix<double, 2, 1> Vec2;
-typedef Eigen::Matrix<double, 3, 1> Vec3;
-typedef Eigen::VectorXd VecX;
+using Vec2 = Eigen::Matrix<double, 2, 1>;
+using Vec3 = Eigen::Matrix<double, 3, 1>;
+using VecX = Eigen::VectorXd;
class ExampleStatic {
public:
- typedef double Scalar;
+ using Scalar = double;
enum {
// Can also be Eigen::Dynamic.
NUM_RESIDUALS = 2,
@@ -60,7 +60,7 @@
class ExampleParametersDynamic {
public:
- typedef double Scalar;
+ using Scalar = double;
enum {
NUM_RESIDUALS = 2,
NUM_PARAMETERS = Eigen::Dynamic,
@@ -77,7 +77,7 @@
class ExampleResidualsDynamic {
public:
- typedef double Scalar;
+ using Scalar = double;
enum {
NUM_RESIDUALS = Eigen::Dynamic,
NUM_PARAMETERS = 3,
@@ -94,7 +94,7 @@
class ExampleAllDynamic {
public:
- typedef double Scalar;
+ using Scalar = double;
enum {
NUM_RESIDUALS = Eigen::Dynamic,
NUM_PARAMETERS = Eigen::Dynamic,
diff --git a/internal/ceres/triplet_sparse_matrix.cc b/internal/ceres/triplet_sparse_matrix.cc
index ad81fd2..bbb5f67 100644
--- a/internal/ceres/triplet_sparse_matrix.cc
+++ b/internal/ceres/triplet_sparse_matrix.cc
@@ -109,8 +109,9 @@
for (int i = 0; i < num_nonzeros_; ++i) {
// clang-format off
if ((rows_[i] < 0) || (rows_[i] >= num_rows_) ||
- (cols_[i] < 0) || (cols_[i] >= num_cols_))
+ (cols_[i] < 0) || (cols_[i] >= num_cols_)) {
return false;
+ }
// clang-format on
}
return true;
diff --git a/internal/ceres/triplet_sparse_matrix.h b/internal/ceres/triplet_sparse_matrix.h
index bcb4529..065c690 100644
--- a/internal/ceres/triplet_sparse_matrix.h
+++ b/internal/ceres/triplet_sparse_matrix.h
@@ -57,7 +57,7 @@
const std::vector<int>& cols,
const std::vector<double>& values);
- explicit TripletSparseMatrix(const TripletSparseMatrix& orig);
+ TripletSparseMatrix(const TripletSparseMatrix& orig);
TripletSparseMatrix& operator=(const TripletSparseMatrix& rhs);
diff --git a/internal/ceres/trust_region_preprocessor.cc b/internal/ceres/trust_region_preprocessor.cc
index 9892e1e..edba47d 100644
--- a/internal/ceres/trust_region_preprocessor.cc
+++ b/internal/ceres/trust_region_preprocessor.cc
@@ -60,9 +60,9 @@
std::shared_ptr<ParameterBlockOrdering> ordering =
std::make_shared<ParameterBlockOrdering>();
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- for (int i = 0; i < parameter_blocks.size(); ++i) {
+ for (auto* parameter_block : parameter_blocks) {
ordering->AddElementToGroup(
- const_cast<double*>(parameter_blocks[i]->user_state()), 0);
+ const_cast<double*>(parameter_block->user_state()), 0);
}
return ordering;
}
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
index aa84a9f..0867733 100644
--- a/internal/ceres/visibility.cc
+++ b/internal/ceres/visibility.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,8 @@
visibility->resize(0);
visibility->resize(block_structure.cols.size() - num_eliminate_blocks);
- for (int i = 0; i < block_structure.rows.size(); ++i) {
- const vector<Cell>& cells = block_structure.rows[i].cells;
+ for (const auto& row : block_structure.rows) {
+ const vector<Cell>& cells = row.cells;
int block_id = cells[0].block_id;
// If the first block is not an e_block, then skip this row block.
if (block_id >= num_eliminate_blocks) {
@@ -87,9 +87,9 @@
// set for each e_block/camera contains the set of e_blocks/points
// visible to it, we find the maximum across all visibility sets.
int num_points = 0;
- for (int i = 0; i < visibility.size(); i++) {
- if (visibility[i].size() > 0) {
- num_points = max(num_points, (*visibility[i].rbegin()) + 1);
+ for (const auto& visible : visibility) {
+ if (visible.size() > 0) {
+ num_points = max(num_points, (*visible.rbegin()) + 1);
}
}
@@ -101,7 +101,7 @@
vector<set<int>> inverse_visibility(num_points);
for (int i = 0; i < visibility.size(); i++) {
const set<int>& visibility_set = visibility[i];
- for (const int v : visibility_set) {
+ for (int v : visibility_set) {
inverse_visibility[v].insert(i);
}
}
@@ -112,10 +112,10 @@
// Count the number of points visible to each camera/f_block pair.
for (const auto& inverse_visibility_set : inverse_visibility) {
- for (set<int>::const_iterator camera1 = inverse_visibility_set.begin();
+ for (auto camera1 = inverse_visibility_set.begin();
camera1 != inverse_visibility_set.end();
++camera1) {
- set<int>::const_iterator camera2 = camera1;
+ auto camera2 = camera1;
for (++camera2; camera2 != inverse_visibility_set.end(); ++camera2) {
++(camera_pairs[make_pair(*camera1, *camera2)]);
}
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 9419e6b..831a866 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -70,9 +70,8 @@
static constexpr double kSingleLinkageMinSimilarity = 0.9;
VisibilityBasedPreconditioner::VisibilityBasedPreconditioner(
- const CompressedRowBlockStructure& bs,
- const Preconditioner::Options& options)
- : options_(options), num_blocks_(0), num_clusters_(0) {
+ const CompressedRowBlockStructure& bs, Preconditioner::Options options)
+ : options_(std::move(options)), num_blocks_(0), num_clusters_(0) {
CHECK_GT(options_.elimination_groups.size(), 1);
CHECK_GT(options_.elimination_groups[0], 0);
CHECK(options_.type == CLUSTER_JACOBI || options_.type == CLUSTER_TRIDIAGONAL)
@@ -283,14 +282,12 @@
}
}
- for (set<int>::const_iterator block1 = f_blocks.begin();
- block1 != f_blocks.end();
- ++block1) {
- set<int>::const_iterator block2 = block1;
+ for (auto block1 = f_blocks.begin(); block1 != f_blocks.end(); ++block1) {
+ auto block2 = block1;
++block2;
for (; block2 != f_blocks.end(); ++block2) {
if (IsBlockPairInPreconditioner(*block1, *block2)) {
- block_pairs_.insert(make_pair(*block1, *block2));
+ block_pairs_.emplace(*block1, *block2);
}
}
}
@@ -302,8 +299,8 @@
CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks;
- for (int j = 0; j < row.cells.size(); ++j) {
- const int block2 = row.cells[j].block_id - num_eliminate_blocks;
+ for (const auto& cell : row.cells) {
+ const int block2 = cell.block_id - num_eliminate_blocks;
if (block1 <= block2) {
if (IsBlockPairInPreconditioner(block1, block2)) {
block_pairs_.insert(make_pair(block1, block2));
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index 04d87ce..8079dc3 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -134,7 +134,7 @@
// It has the same structural requirement as other Schur complement
// based solvers. Please see schur_eliminator.h for more details.
VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs,
- const Preconditioner::Options& options);
+ Preconditioner::Options options);
VisibilityBasedPreconditioner(const VisibilityBasedPreconditioner&) = delete;
void operator=(const VisibilityBasedPreconditioner&) = delete;
diff --git a/internal/ceres/visibility_test.cc b/internal/ceres/visibility_test.cc
index a199963..218bf6e 100644
--- a/internal/ceres/visibility_test.cc
+++ b/internal/ceres/visibility_test.cc
@@ -60,50 +60,50 @@
// Row 1
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
- row.cells.push_back(Cell(5, 0));
+ row.cells.emplace_back(0, 0);
+ row.cells.emplace_back(5, 0);
}
// Row 2
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 2;
- row.cells.push_back(Cell(0, 1));
- row.cells.push_back(Cell(3, 1));
+ row.cells.emplace_back(0, 1);
+ row.cells.emplace_back(3, 1);
}
// Row 3
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 4;
- row.cells.push_back(Cell(1, 2));
- row.cells.push_back(Cell(2, 2));
+ row.cells.emplace_back(1, 2);
+ row.cells.emplace_back(2, 2);
}
// Row 4
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 6;
- row.cells.push_back(Cell(1, 3));
- row.cells.push_back(Cell(4, 3));
+ row.cells.emplace_back(1, 3);
+ row.cells.emplace_back(4, 3);
}
bs.cols.resize(num_cols);
vector<set<int>> visibility;
ComputeVisibility(bs, num_eliminate_blocks, &visibility);
ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
- for (int i = 0; i < visibility.size(); ++i) {
- ASSERT_EQ(visibility[i].size(), 1);
+ for (const auto& visible : visibility) {
+ ASSERT_EQ(visible.size(), 1);
}
std::unique_ptr<WeightedGraph<int>> graph(
@@ -139,46 +139,46 @@
// Row 1
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 0;
- row.cells.push_back(Cell(0, 0));
+ row.cells.emplace_back(0, 0);
}
// Row 2
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 2;
- row.cells.push_back(Cell(0, 1));
+ row.cells.emplace_back(0, 1);
}
// Row 3
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 4;
- row.cells.push_back(Cell(1, 2));
+ row.cells.emplace_back(1, 2);
}
// Row 4
{
- bs.rows.push_back(CompressedRow());
+ bs.rows.emplace_back();
CompressedRow& row = bs.rows.back();
row.block.size = 2;
row.block.position = 6;
- row.cells.push_back(Cell(1, 3));
+ row.cells.emplace_back(1, 3);
}
bs.cols.resize(num_cols);
vector<set<int>> visibility;
ComputeVisibility(bs, num_eliminate_blocks, &visibility);
ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
- for (int i = 0; i < visibility.size(); ++i) {
- ASSERT_EQ(visibility[i].size(), 0);
+ for (const auto& visible : visibility) {
+ ASSERT_EQ(visible.size(), 0);
}
std::unique_ptr<WeightedGraph<int>> graph(