&foo[0] -> foo.data()
Use the more modern form of accessing the data array of a vector
rather than grabbing the pointer to the first element. The latter
can lead to errors if the vector is of zero length.
Change-Id: Ifc8fc969b06b3ba1a9385e8a3a8d5c50b25db5a8
diff --git a/internal/ceres/accelerate_sparse.cc b/internal/ceres/accelerate_sparse.cc
index b01414d..4331c29 100644
--- a/internal/ceres/accelerate_sparse.cc
+++ b/internal/ceres/accelerate_sparse.cc
@@ -114,12 +114,12 @@
// Accelerate's columnStarts is a long*, not an int*. These types might be
// different (e.g. ARM on iOS) so always make a copy.
column_starts_.resize(A->num_rows() + 1); // +1 for final column length.
- std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
+ std::copy_n(A->rows(), column_starts_.size(), column_starts_.data());
ASSparseMatrix At;
At.structure.rowCount = A->num_cols();
At.structure.columnCount = A->num_rows();
- At.structure.columnStarts = &column_starts_[0];
+ At.structure.columnStarts = column_starts_.data();
At.structure.rowIndices = A->mutable_cols();
At.structure.attributes.transpose = false;
At.structure.attributes.triangle = SparseUpperTriangle;
diff --git a/internal/ceres/block_jacobian_writer.cc b/internal/ceres/block_jacobian_writer.cc
index 727d649..d7e1e95 100644
--- a/internal/ceres/block_jacobian_writer.cc
+++ b/internal/ceres/block_jacobian_writer.cc
@@ -94,7 +94,7 @@
jacobian_layout_storage->resize(num_jacobian_blocks);
int e_block_pos = 0;
- int* jacobian_pos = &(*jacobian_layout_storage)[0];
+ int* jacobian_pos = jacobian_layout_storage->data();
for (int i = 0; i < residual_blocks.size(); ++i) {
const ResidualBlock* residual_block = residual_blocks[i];
const int num_residuals = residual_block->NumResiduals();
@@ -144,7 +144,8 @@
auto preparers = std::make_unique<BlockEvaluatePreparer[]>(num_threads);
for (unsigned i = 0; i < num_threads; i++) {
- preparers[i].Init(&jacobian_layout_[0], max_derivatives_per_residual_block);
+ preparers[i].Init(jacobian_layout_.data(),
+ max_derivatives_per_residual_block);
}
return preparers;
}
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index d164bef..d98f61f 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -110,8 +110,8 @@
int num_rows() const final { return num_rows_; }
int num_cols() const final { return num_cols_; }
int num_nonzeros() const final { return rows_[num_rows_]; }
- const double* values() const final { return &values_[0]; }
- double* mutable_values() final { return &values_[0]; }
+ const double* values() const final { return values_.data(); }
+ double* mutable_values() final { return values_.data(); }
// Delete the bottom delta_rows.
// num_rows -= delta_rows
@@ -133,11 +133,11 @@
void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
// Low level access methods that expose the structure of the matrix.
- const int* cols() const { return &cols_[0]; }
- int* mutable_cols() { return &cols_[0]; }
+ const int* cols() const { return cols_.data(); }
+ int* mutable_cols() { return cols_.data(); }
- const int* rows() const { return &rows_[0]; }
- int* mutable_rows() { return &rows_[0]; }
+ const int* rows() const { return rows_.data(); }
+ int* mutable_rows() { return rows_.data(); }
StorageType storage_type() const { return storage_type_; }
void set_storage_type(const StorageType storage_type) {
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index d6ce69b..bc4e75f 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -600,9 +600,9 @@
cholmod_jacobian.ncol = num_cols;
cholmod_jacobian.nzmax = num_nonzeros;
cholmod_jacobian.nz = nullptr;
- cholmod_jacobian.p = reinterpret_cast<void*>(&transpose_rows[0]);
- cholmod_jacobian.i = reinterpret_cast<void*>(&transpose_cols[0]);
- cholmod_jacobian.x = reinterpret_cast<void*>(&transpose_values[0]);
+ cholmod_jacobian.p = reinterpret_cast<void*>(transpose_rows.data());
+ cholmod_jacobian.i = reinterpret_cast<void*>(transpose_cols.data());
+ cholmod_jacobian.x = reinterpret_cast<void*>(transpose_values.data());
cholmod_jacobian.z = nullptr;
cholmod_jacobian.stype = 0; // Matrix is not symmetric.
cholmod_jacobian.itype = CHOLMOD_LONG;
diff --git a/internal/ceres/inner_product_computer.cc b/internal/ceres/inner_product_computer.cc
index a75cbe5..f2d5457 100644
--- a/internal/ceres/inner_product_computer.cc
+++ b/internal/ceres/inner_product_computer.cc
@@ -255,7 +255,7 @@
int nnz = 0;
// Process the first term.
- const InnerProductComputer::ProductTerm* current = &product_terms[0];
+ const InnerProductComputer::ProductTerm* current = product_terms.data();
FILL_CRSM_COL_BLOCK;
// Process the rest of the terms.
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
index eb37dc3..912fe3d 100644
--- a/internal/ceres/reorder_program.cc
+++ b/internal/ceres/reorder_program.cc
@@ -129,7 +129,7 @@
if (parameter_block_ordering.NumGroups() <= 1) {
// The user did not supply a useful ordering so just go ahead
// and use AMD.
- ss.Ordering(block_jacobian_transpose, OrderingType::AMD, &ordering[0]);
+ ss.Ordering(block_jacobian_transpose, OrderingType::AMD, ordering);
} else {
// The user supplied an ordering, so use CAMD.
vector<int> constraints;
@@ -142,9 +142,9 @@
// Renumber the entries of constraints to be contiguous integers
// as CAMD requires that the group ids be in the range [0,
// parameter_blocks.size() - 1].
- MapValuesToContiguousRange(constraints.size(), &constraints[0]);
+ MapValuesToContiguousRange(constraints.size(), constraints.data());
ss.ConstrainedApproximateMinimumDegreeOrdering(
- block_jacobian_transpose, &constraints[0], ordering);
+ block_jacobian_transpose, constraints.data(), ordering);
}
} else if (linear_solver_ordering_type == ceres::NESDIS) {
// If nested dissection is chosen as an ordering algorithm, then
@@ -152,7 +152,7 @@
CHECK(SuiteSparse::IsNestedDissectionAvailable())
<< "Congratulations, you found a Ceres bug! "
<< "Please report this error to the developers.";
- ss.Ordering(block_jacobian_transpose, OrderingType::NESDIS, &ordering[0]);
+ ss.Ordering(block_jacobian_transpose, OrderingType::NESDIS, ordering);
} else {
LOG(FATAL) << "Congratulations, you found a Ceres bug! "
<< "Please report this error to the developers.";
@@ -344,7 +344,7 @@
// Renumber the entries of constraints to be contiguous integers as
// CAMD requires that the group ids be in the range [0,
// parameter_blocks.size() - 1].
- MapValuesToContiguousRange(constraints.size(), &constraints[0]);
+ MapValuesToContiguousRange(constraints.size(), constraints.data());
// Compute a block sparse presentation of J'.
std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
@@ -355,7 +355,7 @@
vector<int> ordering(parameter_blocks.size(), 0);
ss.ConstrainedApproximateMinimumDegreeOrdering(
- block_jacobian_transpose, &constraints[0], &ordering[0]);
+ block_jacobian_transpose, constraints.data(), ordering.data());
ss.Free(block_jacobian_transpose);
const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
@@ -551,7 +551,7 @@
*tsm_block_jacobian_transpose,
parameter_blocks,
parameter_block_ordering,
- &ordering[0]);
+ ordering.data());
} else if (sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
// Accelerate does not provide a function to perform reordering without
// performing a full symbolic factorisation. As such, we have nothing
@@ -565,7 +565,7 @@
OrderingForSparseNormalCholeskyUsingEigenSparse(
linear_solver_ordering_type,
*tsm_block_jacobian_transpose,
- &ordering[0]);
+ ordering.data());
}
// Apply ordering.
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 85fc5b2..3f091b9 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -189,7 +189,7 @@
cc_.nmethods = 1;
cc_.method[0].ordering = CHOLMOD_GIVEN;
cholmod_factor* factor =
- cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), nullptr, 0, &cc_);
+ cholmod_analyze_p(A, const_cast<int*>(ordering.data()), nullptr, 0, &cc_);
if (cc_.status != CHOLMOD_OK) {
*message =
@@ -236,8 +236,8 @@
block_matrix.nrow = num_row_blocks;
block_matrix.ncol = num_col_blocks;
block_matrix.nzmax = block_rows.size();
- block_matrix.p = reinterpret_cast<void*>(&block_cols[0]);
- block_matrix.i = reinterpret_cast<void*>(&block_rows[0]);
+ block_matrix.p = reinterpret_cast<void*>(block_cols.data());
+ block_matrix.i = reinterpret_cast<void*>(block_rows.data());
block_matrix.x = nullptr;
block_matrix.stype = A->stype;
block_matrix.itype = CHOLMOD_INT;