LinearOperator::FooMultiply -> LinearOperator::FooMultiplyAndAccumulate

These methods were historically poorly named and every time I read code
I get confused whether they are just multiplying or multiplying and
adding. Clarifying them also gives us the changce to introduce
RightMultiply and LeftMultiply methods in the base class which will
simplify a number call sites in a subsequent CL.

Fixes https://github.com/ceres-solver/ceres-solver/issues/855

Change-Id: Ice4fb483f1acd02527a6dd753ef0c5a66037f4b0
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
index 07d6cd3..fdba2b8 100644
--- a/internal/ceres/block_jacobi_preconditioner.cc
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -90,9 +90,9 @@
   return true;
 }
 
-void BlockJacobiPreconditioner::RightMultiply(const double* x,
-                                              double* y) const {
-  m_->RightMultiply(x, y);
+void BlockJacobiPreconditioner::RightMultiplyAndAccumulate(const double* x,
+                                                           double* y) const {
+  m_->RightMultiplyAndAccumulate(x, y);
 }
 
 }  // namespace ceres::internal
diff --git a/internal/ceres/block_jacobi_preconditioner.h b/internal/ceres/block_jacobi_preconditioner.h
index 0928919..7728eb9 100644
--- a/internal/ceres/block_jacobi_preconditioner.h
+++ b/internal/ceres/block_jacobi_preconditioner.h
@@ -64,7 +64,7 @@
   ~BlockJacobiPreconditioner() override;
 
   // Preconditioner interface
-  void RightMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
   int num_rows() const final { return m_->num_rows(); }
   int num_cols() const final { return m_->num_rows(); }
   const BlockRandomAccessDiagonalMatrix& matrix() const { return *m_; }
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
index 5a9f772..006713f 100644
--- a/internal/ceres/block_random_access_diagonal_matrix.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -130,8 +130,8 @@
   }
 }
 
-void BlockRandomAccessDiagonalMatrix::RightMultiply(const double* x,
-                                                    double* y) const {
+void BlockRandomAccessDiagonalMatrix::RightMultiplyAndAccumulate(
+    const double* x, double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
   const double* values = tsm_->values();
diff --git a/internal/ceres/block_random_access_diagonal_matrix.h b/internal/ceres/block_random_access_diagonal_matrix.h
index 45e3d02..2a726a0 100644
--- a/internal/ceres/block_random_access_diagonal_matrix.h
+++ b/internal/ceres/block_random_access_diagonal_matrix.h
@@ -75,7 +75,7 @@
   void Invert();
 
   // y += S * x
-  void RightMultiply(const double* x, double* y) const;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const;
 
   // Since the matrix is square, num_rows() == num_cols().
   int num_rows() const final { return tsm_->num_rows(); }
diff --git a/internal/ceres/block_random_access_diagonal_matrix_test.cc b/internal/ceres/block_random_access_diagonal_matrix_test.cc
index 42a309f..37e1f88 100644
--- a/internal/ceres/block_random_access_diagonal_matrix_test.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix_test.cc
@@ -134,7 +134,7 @@
       kTolerance);
 }
 
-TEST_F(BlockRandomAccessDiagonalMatrixTest, RightMultiply) {
+TEST_F(BlockRandomAccessDiagonalMatrixTest, RightMultiplyAndAccumulate) {
   double kTolerance = 1e-14;
   const TripletSparseMatrix* tsm = m_->matrix();
   Matrix dense;
@@ -142,7 +142,7 @@
   Vector x = Vector::Random(dense.rows());
   Vector expected_y = dense * x;
   Vector actual_y = Vector::Zero(dense.rows());
-  m_->RightMultiply(x.data(), actual_y.data());
+  m_->RightMultiplyAndAccumulate(x.data(), actual_y.data());
   EXPECT_NEAR((expected_y - actual_y).norm(), 0, kTolerance);
 }
 
diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
index 3ae0cba..2df4c71 100644
--- a/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/internal/ceres/block_random_access_sparse_matrix.cc
@@ -147,8 +147,8 @@
   }
 }
 
-void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
-                                                           double* y) const {
+void BlockRandomAccessSparseMatrix::SymmetricRightMultiplyAndAccumulate(
+    const double* x, double* y) const {
   for (const auto& cell_position_and_data : cell_values_) {
     const int row = cell_position_and_data.first.first;
     const int row_block_size = blocks_[row];
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
index 882292c..fe2b13c 100644
--- a/internal/ceres/block_random_access_sparse_matrix.h
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -83,7 +83,7 @@
   // matrix is stored.
   //
   // y += S * x
-  void SymmetricRightMultiply(const double* x, double* y) const;
+  void SymmetricRightMultiplyAndAccumulate(const double* x, double* y) const;
 
   // Since the matrix is square, num_rows() == num_cols().
   int num_rows() const final { return tsm_->num_rows(); }
diff --git a/internal/ceres/block_random_access_sparse_matrix_test.cc b/internal/ceres/block_random_access_sparse_matrix_test.cc
index 7224b65..605bfbe 100644
--- a/internal/ceres/block_random_access_sparse_matrix_test.cc
+++ b/internal/ceres/block_random_access_sparse_matrix_test.cc
@@ -127,7 +127,7 @@
   Vector expected_y = Vector::Zero(dense.rows());
 
   expected_y += dense.selfadjointView<Eigen::Upper>() * x;
-  m.SymmetricRightMultiply(x.data(), actual_y.data());
+  m.SymmetricRightMultiplyAndAccumulate(x.data(), actual_y.data());
   EXPECT_NEAR((expected_y - actual_y).norm(), 0.0, kTolerance)
       << "actual: " << actual_y.transpose() << "\n"
       << "expected: " << expected_y.transpose() << "matrix: \n " << dense;
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
index 1bfa343..ae6bd3a 100644
--- a/internal/ceres/block_sparse_matrix.cc
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -88,7 +88,8 @@
   std::fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
 }
 
-void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
+void BlockSparseMatrix::RightMultiplyAndAccumulate(const double* x,
+                                                   double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
@@ -110,7 +111,8 @@
   }
 }
 
-void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
+void BlockSparseMatrix::LeftMultiplyAndAccumulate(const double* x,
+                                                  double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
diff --git a/internal/ceres/block_sparse_matrix.h b/internal/ceres/block_sparse_matrix.h
index da6b641..7cef18d 100644
--- a/internal/ceres/block_sparse_matrix.h
+++ b/internal/ceres/block_sparse_matrix.h
@@ -71,8 +71,8 @@
 
   // Implementation of SparseMatrix interface.
   void SetZero() final;
-  void RightMultiply(const double* x, double* y) const final;
-  void LeftMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final;
   void SquaredColumnNorm(double* x) const final;
   void ScaleColumns(const double* scale) final;
   void ToCRSMatrix(CRSMatrix* matrix) const;
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
index 7fab13a..4b02abf 100644
--- a/internal/ceres/block_sparse_matrix_test.cc
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -148,26 +148,26 @@
   EXPECT_EQ(13, A_->num_nonzeros());
 }
 
-TEST_F(BlockSparseMatrixTest, RightMultiplyTest) {
+TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateTest) {
   Vector y_a = Vector::Zero(A_->num_rows());
   Vector y_b = Vector::Zero(A_->num_rows());
   for (int i = 0; i < A_->num_cols(); ++i) {
     Vector x = Vector::Zero(A_->num_cols());
     x[i] = 1.0;
-    A_->RightMultiply(x.data(), y_a.data());
-    B_->RightMultiply(x.data(), y_b.data());
+    A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+    B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
     EXPECT_LT((y_a - y_b).norm(), 1e-12);
   }
 }
 
-TEST_F(BlockSparseMatrixTest, LeftMultiplyTest) {
+TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateTest) {
   Vector y_a = Vector::Zero(A_->num_cols());
   Vector y_b = Vector::Zero(A_->num_cols());
   for (int i = 0; i < A_->num_rows(); ++i) {
     Vector x = Vector::Zero(A_->num_rows());
     x[i] = 1.0;
-    A_->LeftMultiply(x.data(), y_a.data());
-    B_->LeftMultiply(x.data(), y_b.data());
+    A_->LeftMultiplyAndAccumulate(x.data(), y_a.data());
+    B_->LeftMultiplyAndAccumulate(x.data(), y_b.data());
     EXPECT_LT((y_a - y_b).norm(), 1e-12);
   }
 }
@@ -210,8 +210,8 @@
     y_a.setZero();
     y_b.setZero();
 
-    A_->RightMultiply(x.data(), y_a.data());
-    B_->RightMultiply(x.data(), y_b.data());
+    A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+    B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
     EXPECT_LT((y_a - y_b).norm(), 1e-12);
   }
 }
@@ -237,8 +237,8 @@
     y_a.setZero();
     y_b.setZero();
 
-    A_->RightMultiply(x.data(), y_a.data());
-    B_->RightMultiply(x.data(), y_b.data());
+    A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+    B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
     EXPECT_LT((y_a.head(B_->num_rows()) - y_b.head(B_->num_rows())).norm(),
               1e-12);
     Vector expected_tail = Vector::Zero(A_->num_cols());
@@ -258,8 +258,8 @@
     y_a.setZero();
     y_b.setZero();
 
-    A_->RightMultiply(x.data(), y_a.data());
-    B_->RightMultiply(x.data(), y_b.data());
+    A_->RightMultiplyAndAccumulate(x.data(), y_a.data());
+    B_->RightMultiplyAndAccumulate(x.data(), y_b.data());
     EXPECT_LT((y_a - y_b).norm(), 1e-12);
   }
 }
@@ -287,7 +287,7 @@
   EXPECT_EQ(m->num_rows(), m->num_cols());
   Vector x = Vector::Ones(num_cols);
   Vector y = Vector::Zero(num_cols);
-  m->RightMultiply(x.data(), y.data());
+  m->RightMultiplyAndAccumulate(x.data(), y.data());
   for (int i = 0; i < num_cols; ++i) {
     EXPECT_NEAR(y[i], diagonal[i], std::numeric_limits<double>::epsilon());
   }
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index f79b897..99d5375 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -85,12 +85,12 @@
   CgnrLinearOperator(const LinearOperator& A, const double* D)
       : A_(A), D_(D), z_(Vector::Zero(A.num_rows())) {}
 
-  void RightMultiply(const Vector& x, Vector& y) final {
+  void RightMultiplyAndAccumulate(const Vector& x, Vector& y) final {
     // z = Ax
     // y = y + Atz
     z_.setZero();
-    A_.RightMultiply(x, z_);
-    A_.LeftMultiply(z_, y);
+    A_.RightMultiplyAndAccumulate(x, z_);
+    A_.LeftMultiplyAndAccumulate(z_, y);
 
     // y = y + DtDx
     if (D_ != nullptr) {
@@ -159,7 +159,7 @@
   // rhs = Atb.
   Vector rhs(A->num_cols());
   rhs.setZero();
-  A->LeftMultiply(b, rhs.data());
+  A->LeftMultiplyAndAccumulate(b, rhs.data());
 
   cg_solution_ = Vector::Zero(A->num_cols());
   for (int i = 0; i < 4; ++i) {
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index 9c78565..574b0c0 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -278,10 +278,10 @@
   std::fill(values_.begin(), values_.end(), 0);
 }
 
-// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
-// block-aware for higher performance.
-void CompressedRowSparseMatrix::RightMultiply(const double* x,
-                                              double* y) const {
+// TODO(sameeragarwal): Make RightMultiplyAndAccumulate and
+// LeftMultiplyAndAccumulate block-aware for higher performance.
+void CompressedRowSparseMatrix::RightMultiplyAndAccumulate(const double* x,
+                                                           double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
@@ -342,7 +342,8 @@
   }
 }
 
-void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
+void CompressedRowSparseMatrix::LeftMultiplyAndAccumulate(const double* x,
+                                                          double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
@@ -353,8 +354,9 @@
       }
     }
   } else {
-    // Since the matrix is symmetric, LeftMultiply = RightMultiply.
-    RightMultiply(x, y);
+    // Since the matrix is symmetric, LeftMultiplyAndAccumulate =
+    // RightMultiplyAndAccumulate.
+    RightMultiplyAndAccumulate(x, y);
   }
 }
 
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index 1d1ac95..2580045 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -100,8 +100,8 @@
   // SparseMatrix interface.
   ~CompressedRowSparseMatrix() override;
   void SetZero() final;
-  void RightMultiply(const double* x, double* y) const final;
-  void LeftMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final;
   void SquaredColumnNorm(double* x) const final;
   void ScaleColumns(const double* scale) final;
   void ToDenseMatrix(Matrix* dense_matrix) const final;
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 0f1b948..42f5498 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -64,8 +64,8 @@
     Vector y_a = Vector::Zero(num_rows);
     Vector y_b = Vector::Zero(num_rows);
 
-    a->RightMultiply(x.data(), y_a.data());
-    b->RightMultiply(x.data(), y_b.data());
+    a->RightMultiplyAndAccumulate(x.data(), y_a.data());
+    b->RightMultiplyAndAccumulate(x.data(), y_b.data());
     EXPECT_EQ((y_a - y_b).norm(), 0);
   }
 }
@@ -237,13 +237,13 @@
 
   x.setOnes();
   y.setZero();
-  matrix->RightMultiply(x.data(), y.data());
+  matrix->RightMultiplyAndAccumulate(x.data(), y.data());
   for (int i = 0; i < diagonal.size(); ++i) {
     EXPECT_EQ(y[i], diagonal[i]);
   }
 
   y.setZero();
-  matrix->LeftMultiply(x.data(), y.data());
+  matrix->LeftMultiplyAndAccumulate(x.data(), y.data());
   for (int i = 0; i < diagonal.size(); ++i) {
     EXPECT_EQ(y[i], diagonal[i]);
   }
@@ -396,9 +396,10 @@
   return "UNSYMMETRIC";
 }
 
-class RightMultiplyTest : public ::testing::TestWithParam<Param> {};
+class RightMultiplyAndAccumulateTest : public ::testing::TestWithParam<Param> {
+};
 
-TEST_P(RightMultiplyTest, _) {
+TEST_P(RightMultiplyAndAccumulateTest, _) {
   const int kMinNumBlocks = 1;
   const int kMaxNumBlocks = 10;
   const int kMinBlockSize = 1;
@@ -429,7 +430,7 @@
 
       Vector actual_y(num_rows);
       actual_y.setZero();
-      matrix->RightMultiply(x.data(), actual_y.data());
+      matrix->RightMultiplyAndAccumulate(x.data(), actual_y.data());
 
       Matrix dense;
       matrix->ToDenseMatrix(&dense);
@@ -460,15 +461,15 @@
 
 INSTANTIATE_TEST_SUITE_P(
     CompressedRowSparseMatrix,
-    RightMultiplyTest,
+    RightMultiplyAndAccumulateTest,
     ::testing::Values(CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR,
                       CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR,
                       CompressedRowSparseMatrix::StorageType::UNSYMMETRIC),
     ParamInfoToString);
 
-class LeftMultiplyTest : public ::testing::TestWithParam<Param> {};
+class LeftMultiplyAndAccumulateTest : public ::testing::TestWithParam<Param> {};
 
-TEST_P(LeftMultiplyTest, _) {
+TEST_P(LeftMultiplyAndAccumulateTest, _) {
   const int kMinNumBlocks = 1;
   const int kMaxNumBlocks = 10;
   const int kMinBlockSize = 1;
@@ -499,7 +500,7 @@
 
       Vector actual_y(num_cols);
       actual_y.setZero();
-      matrix->LeftMultiply(x.data(), actual_y.data());
+      matrix->LeftMultiplyAndAccumulate(x.data(), actual_y.data());
 
       Matrix dense;
       matrix->ToDenseMatrix(&dense);
@@ -530,7 +531,7 @@
 
 INSTANTIATE_TEST_SUITE_P(
     CompressedRowSparseMatrix,
-    LeftMultiplyTest,
+    LeftMultiplyAndAccumulateTest,
     ::testing::Values(CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR,
                       CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR,
                       CompressedRowSparseMatrix::StorageType::UNSYMMETRIC),
diff --git a/internal/ceres/conjugate_gradients_solver.h b/internal/ceres/conjugate_gradients_solver.h
index 6254d2c..93f9e25 100644
--- a/internal/ceres/conjugate_gradients_solver.h
+++ b/internal/ceres/conjugate_gradients_solver.h
@@ -55,7 +55,8 @@
 class ConjugateGradientsLinearOperator {
  public:
   ~ConjugateGradientsLinearOperator() = default;
-  virtual void RightMultiply(const DenseVectorType& x, DenseVectorType& y) = 0;
+  virtual void RightMultiplyAndAccumulate(const DenseVectorType& x,
+                                          DenseVectorType& y) = 0;
 };
 
 // Adapter class that makes LinearOperator appear like an instance of
@@ -65,8 +66,8 @@
   LinearOperatorAdapter(LinearOperator& linear_operator)
       : linear_operator_(linear_operator) {}
 
-  void RightMultiply(const Vector& x, Vector& y) final {
-    linear_operator_.RightMultiply(x, y);
+  void RightMultiplyAndAccumulate(const Vector& x, Vector& y) final {
+    linear_operator_.RightMultiplyAndAccumulate(x, y);
   }
 
  private:
@@ -135,7 +136,7 @@
   const double tol_r = options.r_tolerance * norm_b;
 
   SetZero(tmp);
-  lhs.RightMultiply(solution, tmp);
+  lhs.RightMultiplyAndAccumulate(solution, tmp);
 
   // r = rhs - tmp
   Axpby(1.0, rhs, -1.0, tmp, r);
@@ -157,7 +158,7 @@
 
   for (summary.num_iterations = 1;; ++summary.num_iterations) {
     SetZero(z);
-    preconditioner.RightMultiply(r, z);
+    preconditioner.RightMultiplyAndAccumulate(r, z);
 
     double last_rho = rho;
     // rho = r.dot(z);
@@ -188,7 +189,7 @@
 
     DenseVectorType& q = z;
     SetZero(q);
-    lhs.RightMultiply(p, q);
+    lhs.RightMultiplyAndAccumulate(p, q);
     const double pq = Dot(p, q);
     if ((pq <= 0) || std::isinf(pq)) {
       summary.termination_type = LinearSolverTerminationType::NO_CONVERGENCE;
@@ -224,7 +225,7 @@
     // double the complexity of the CG algorithm.
     if (summary.num_iterations % options.residual_reset_period == 0) {
       SetZero(tmp);
-      lhs.RightMultiply(solution, tmp);
+      lhs.RightMultiplyAndAccumulate(solution, tmp);
       Axpby(1.0, rhs, -1.0, tmp, r);
       // r = rhs - tmp;
     } else {
diff --git a/internal/ceres/context_impl.h b/internal/ceres/context_impl.h
index 3324b52..d4bd436 100644
--- a/internal/ceres/context_impl.h
+++ b/internal/ceres/context_impl.h
@@ -45,8 +45,8 @@
 #ifndef CERES_NO_CUDA
 #include "cublas_v2.h"
 #include "cuda_runtime.h"
-#include "cusparse.h"
 #include "cusolverDn.h"
+#include "cusparse.h"
 #endif  // CERES_NO_CUDA
 
 #ifdef CERES_USE_CXX_THREADS
diff --git a/internal/ceres/cuda_buffer.h b/internal/ceres/cuda_buffer.h
index f8abf13..dba1706 100644
--- a/internal/ceres/cuda_buffer.h
+++ b/internal/ceres/cuda_buffer.h
@@ -66,8 +66,9 @@
       if (data_ != nullptr) {
         CHECK_EQ(cudaFree(data_), cudaSuccess);
       }
-      CHECK_EQ(cudaMalloc(&data_, size * sizeof(T)), cudaSuccess) <<
-          "Failed to allocate " << size * sizeof(T) << " bytes of GPU memory";
+      CHECK_EQ(cudaMalloc(&data_, size * sizeof(T)), cudaSuccess)
+          << "Failed to allocate " << size * sizeof(T)
+          << " bytes of GPU memory";
       size_ = size;
     }
   }
diff --git a/internal/ceres/cuda_kernels_test.cc b/internal/ceres/cuda_kernels_test.cc
index fbdc4b3..83b922d 100644
--- a/internal/ceres/cuda_kernels_test.cc
+++ b/internal/ceres/cuda_kernels_test.cc
@@ -52,10 +52,8 @@
   fp64_gpu.CopyFromCpuVector(fp64_cpu, cudaStreamDefault);
   CudaBuffer<float> fp32_gpu;
   fp32_gpu.Reserve(fp64_cpu.size());
-  CudaFP64ToFP32(fp64_gpu.data(),
-                 fp32_gpu.data(),
-                 fp64_cpu.size(),
-                 cudaStreamDefault);
+  CudaFP64ToFP32(
+      fp64_gpu.data(), fp32_gpu.data(), fp64_cpu.size(), cudaStreamDefault);
   std::vector<float> fp32_cpu(fp64_cpu.size());
   fp32_gpu.CopyToCpu(fp32_cpu.data(), fp32_cpu.size());
   for (int i = 0; i < fp32_cpu.size(); ++i) {
@@ -65,11 +63,7 @@
 
 TEST(CudaFP64ToFP32, NumericallyExtremeValues) {
   std::vector<double> fp64_cpu = {
-      DBL_MIN,
-      10.0 * DBL_MIN,
-      DBL_MAX,
-      0.1 * DBL_MAX
-  };
+      DBL_MIN, 10.0 * DBL_MIN, DBL_MAX, 0.1 * DBL_MAX};
   // First just make sure that the compiler has represented these values
   // accurately as fp64.
   EXPECT_GT(fp64_cpu[0], 0.0);
@@ -80,10 +74,8 @@
   fp64_gpu.CopyFromCpuVector(fp64_cpu, cudaStreamDefault);
   CudaBuffer<float> fp32_gpu;
   fp32_gpu.Reserve(fp64_cpu.size());
-  CudaFP64ToFP32(fp64_gpu.data(),
-                 fp32_gpu.data(),
-                 fp64_cpu.size(),
-                 cudaStreamDefault);
+  CudaFP64ToFP32(
+      fp64_gpu.data(), fp32_gpu.data(), fp64_cpu.size(), cudaStreamDefault);
   std::vector<float> fp32_cpu(fp64_cpu.size());
   fp32_gpu.CopyToCpu(fp32_cpu.data(), fp32_cpu.size());
   EXPECT_EQ(fp32_cpu[0], 0.0f);
@@ -98,10 +90,8 @@
   fp32_gpu.CopyFromCpuVector(fp32_cpu, cudaStreamDefault);
   CudaBuffer<double> fp64_gpu;
   fp64_gpu.Reserve(fp32_cpu.size());
-  CudaFP32ToFP64(fp32_gpu.data(),
-                 fp64_gpu.data(),
-                 fp32_cpu.size(),
-                 cudaStreamDefault);
+  CudaFP32ToFP64(
+      fp32_gpu.data(), fp64_gpu.data(), fp32_cpu.size(), cudaStreamDefault);
   std::vector<double> fp64_cpu(fp32_cpu.size());
   fp64_gpu.CopyToCpu(fp64_cpu.data(), fp64_cpu.size());
   for (int i = 0; i < fp64_cpu.size(); ++i) {
@@ -135,8 +125,8 @@
 
 TEST(CudaDsxpy, DoubleValues) {
   std::vector<float> fp32_cpu_a = {1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0};
-  std::vector<double> fp64_cpu_b =
-      {1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0};
+  std::vector<double> fp64_cpu_b = {
+      1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0};
   CudaBuffer<float> fp32_gpu_a;
   fp32_gpu_a.CopyFromCpuVector(fp32_cpu_a, cudaStreamDefault);
   CudaBuffer<double> fp64_gpu_b;
diff --git a/internal/ceres/dense_sparse_matrix.cc b/internal/ceres/dense_sparse_matrix.cc
index e71546b..67c3d2b 100644
--- a/internal/ceres/dense_sparse_matrix.cc
+++ b/internal/ceres/dense_sparse_matrix.cc
@@ -59,11 +59,13 @@
 
 void DenseSparseMatrix::SetZero() { m_.setZero(); }
 
-void DenseSparseMatrix::RightMultiply(const double* x, double* y) const {
+void DenseSparseMatrix::RightMultiplyAndAccumulate(const double* x,
+                                                   double* y) const {
   VectorRef(y, num_rows()).noalias() += m_ * ConstVectorRef(x, num_cols());
 }
 
-void DenseSparseMatrix::LeftMultiply(const double* x, double* y) const {
+void DenseSparseMatrix::LeftMultiplyAndAccumulate(const double* x,
+                                                  double* y) const {
   VectorRef(y, num_cols()).noalias() +=
       m_.transpose() * ConstVectorRef(x, num_rows());
 }
diff --git a/internal/ceres/dense_sparse_matrix.h b/internal/ceres/dense_sparse_matrix.h
index 160a591..5fc71c2 100644
--- a/internal/ceres/dense_sparse_matrix.h
+++ b/internal/ceres/dense_sparse_matrix.h
@@ -53,8 +53,8 @@
 
   // SparseMatrix interface.
   void SetZero() final;
-  void RightMultiply(const double* x, double* y) const final;
-  void LeftMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final;
   void SquaredColumnNorm(double* x) const final;
   void ScaleColumns(const double* scale) final;
   void ToDenseMatrix(Matrix* dense_matrix) const final;
diff --git a/internal/ceres/dense_sparse_matrix_test.cc b/internal/ceres/dense_sparse_matrix_test.cc
index 6bce0b4..0ba9fc2 100644
--- a/internal/ceres/dense_sparse_matrix_test.cc
+++ b/internal/ceres/dense_sparse_matrix_test.cc
@@ -59,8 +59,8 @@
     Vector y_a = Vector::Zero(num_rows);
     Vector y_b = Vector::Zero(num_rows);
 
-    a->RightMultiply(x.data(), y_a.data());
-    b->RightMultiply(x.data(), y_b.data());
+    a->RightMultiplyAndAccumulate(x.data(), y_a.data());
+    b->RightMultiplyAndAccumulate(x.data(), y_b.data());
 
     EXPECT_EQ((y_a - y_b).norm(), 0);
   }
@@ -88,7 +88,7 @@
   std::unique_ptr<DenseSparseMatrix> dsm;
 };
 
-TEST_F(DenseSparseMatrixTest, RightMultiply) {
+TEST_F(DenseSparseMatrixTest, RightMultiplyAndAccumulate) {
   CompareMatrices(tsm.get(), dsm.get());
 
   // Try with a not entirely zero vector to verify column interactions, which
@@ -100,13 +100,13 @@
   Vector b1 = Vector::Zero(num_rows);
   Vector b2 = Vector::Zero(num_rows);
 
-  tsm->RightMultiply(a.data(), b1.data());
-  dsm->RightMultiply(a.data(), b2.data());
+  tsm->RightMultiplyAndAccumulate(a.data(), b1.data());
+  dsm->RightMultiplyAndAccumulate(a.data(), b2.data());
 
   EXPECT_EQ((b1 - b2).norm(), 0);
 }
 
-TEST_F(DenseSparseMatrixTest, LeftMultiply) {
+TEST_F(DenseSparseMatrixTest, LeftMultiplyAndAccumulate) {
   for (int i = 0; i < num_rows; ++i) {
     Vector a = Vector::Zero(num_rows);
     a(i) = 1.0;
@@ -114,8 +114,8 @@
     Vector b1 = Vector::Zero(num_cols);
     Vector b2 = Vector::Zero(num_cols);
 
-    tsm->LeftMultiply(a.data(), b1.data());
-    dsm->LeftMultiply(a.data(), b2.data());
+    tsm->LeftMultiplyAndAccumulate(a.data(), b1.data());
+    dsm->LeftMultiplyAndAccumulate(a.data(), b2.data());
 
     EXPECT_EQ((b1 - b2).norm(), 0);
   }
@@ -129,8 +129,8 @@
   Vector b1 = Vector::Zero(num_cols);
   Vector b2 = Vector::Zero(num_cols);
 
-  tsm->LeftMultiply(a.data(), b1.data());
-  dsm->LeftMultiply(a.data(), b2.data());
+  tsm->LeftMultiplyAndAccumulate(a.data(), b1.data());
+  dsm->LeftMultiplyAndAccumulate(a.data(), b2.data());
 
   EXPECT_EQ((b1 - b2).norm(), 0);
 }
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index ac8c7d7..0db57de 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -175,7 +175,7 @@
 void DoglegStrategy::ComputeGradient(SparseMatrix* jacobian,
                                      const double* residuals) {
   gradient_.setZero();
-  jacobian->LeftMultiply(residuals, gradient_.data());
+  jacobian->LeftMultiplyAndAccumulate(residuals, gradient_.data());
   gradient_.array() /= diagonal_.array();
 }
 
@@ -188,7 +188,7 @@
   // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
   // instead of (J * D^-1) * (D^-1 * g).
   Vector scaled_gradient = (gradient_.array() / diagonal_.array()).matrix();
-  jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
+  jacobian->RightMultiplyAndAccumulate(scaled_gradient.data(), Jg.data());
   alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
 }
 
@@ -706,9 +706,9 @@
 
   Vector tmp;
   tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
-  jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
+  jacobian->RightMultiplyAndAccumulate(tmp.data(), Jb.row(0).data());
   tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
-  jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
+  jacobian->RightMultiplyAndAccumulate(tmp.data(), Jb.row(1).data());
 
   subspace_B_ = Jb * Jb.transpose();
 
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index 992d48c..81cf933 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -64,7 +64,7 @@
     double* x) {
   const int num_cols = A->num_cols();
   VectorRef(x, num_cols).setZero();
-  A->LeftMultiply(b, x);
+  A->LeftMultiplyAndAccumulate(b, x);
 
   if (per_solve_options.D != nullptr) {
     // Temporarily append a diagonal block to the A matrix, but undo
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
index f9ff443..8d66022 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
@@ -72,7 +72,7 @@
 
     Vector rhs(A_->num_cols());
     rhs.setZero();
-    A_->LeftMultiply(b_.get(), rhs.data());
+    A_->LeftMultiplyAndAccumulate(b_.get(), rhs.data());
     Vector expected_solution = lhs.llt().solve(rhs);
 
     std::unique_ptr<LinearSolver> solver(LinearSolver::Create(options));
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
index 7946a56..751612f 100644
--- a/internal/ceres/implicit_schur_complement.cc
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -96,23 +96,24 @@
 // By breaking it down into individual matrix vector products
 // involving the matrices E and F. This is implemented using a
 // PartitionedMatrixView of the input matrix A.
-void ImplicitSchurComplement::RightMultiply(const double* x, double* y) const {
+void ImplicitSchurComplement::RightMultiplyAndAccumulate(const double* x,
+                                                         double* y) const {
   // y1 = F x
   tmp_rows_.setZero();
-  A_->RightMultiplyF(x, tmp_rows_.data());
+  A_->RightMultiplyAndAccumulateF(x, tmp_rows_.data());
 
   // y2 = E' y1
   tmp_e_cols_.setZero();
-  A_->LeftMultiplyE(tmp_rows_.data(), tmp_e_cols_.data());
+  A_->LeftMultiplyAndAccumulateE(tmp_rows_.data(), tmp_e_cols_.data());
 
   // y3 = -(E'E)^-1 y2
   tmp_e_cols_2_.setZero();
-  block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(),
-                                             tmp_e_cols_2_.data());
+  block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(tmp_e_cols_.data(),
+                                                          tmp_e_cols_2_.data());
   tmp_e_cols_2_ *= -1.0;
 
   // y1 = y1 + E y3
-  A_->RightMultiplyE(tmp_e_cols_2_.data(), tmp_rows_.data());
+  A_->RightMultiplyAndAccumulateE(tmp_e_cols_2_.data(), tmp_rows_.data());
 
   // y5 = D * x
   if (D_ != nullptr) {
@@ -125,7 +126,7 @@
   }
 
   // y = y5 + F' y1
-  A_->LeftMultiplyF(tmp_rows_.data(), y);
+  A_->LeftMultiplyAndAccumulateF(tmp_rows_.data(), y);
 }
 
 // Given a block diagonal matrix and an optional array of diagonal
@@ -153,8 +154,8 @@
   }
 }
 
-// Similar to RightMultiply, use the block structure of the matrix A
-// to compute y = (E'E)^-1 (E'b - E'F x).
+// Similar to RightMultiplyAndAccumulate, use the block structure of the matrix
+// A to compute y = (E'E)^-1 (E'b - E'F x).
 void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
   const int num_cols_e = A_->num_cols_e();
   const int num_cols_f = A_->num_cols_f();
@@ -163,18 +164,19 @@
 
   // y1 = F x
   tmp_rows_.setZero();
-  A_->RightMultiplyF(x, tmp_rows_.data());
+  A_->RightMultiplyAndAccumulateF(x, tmp_rows_.data());
 
   // y2 = b - y1
   tmp_rows_ = ConstVectorRef(b_, num_rows) - tmp_rows_;
 
   // y3 = E' y2
   tmp_e_cols_.setZero();
-  A_->LeftMultiplyE(tmp_rows_.data(), tmp_e_cols_.data());
+  A_->LeftMultiplyAndAccumulateE(tmp_rows_.data(), tmp_e_cols_.data());
 
   // y = (E'E)^-1 y3
   VectorRef(y, num_cols).setZero();
-  block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(), y);
+  block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(tmp_e_cols_.data(),
+                                                          y);
 
   // The full solution vector y has two blocks. The first block of
   // variables corresponds to the eliminated variables, which we just
@@ -193,22 +195,23 @@
 void ImplicitSchurComplement::UpdateRhs() {
   // y1 = E'b
   tmp_e_cols_.setZero();
-  A_->LeftMultiplyE(b_, tmp_e_cols_.data());
+  A_->LeftMultiplyAndAccumulateE(b_, tmp_e_cols_.data());
 
   // y2 = (E'E)^-1 y1
   Vector y2 = Vector::Zero(A_->num_cols_e());
-  block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(), y2.data());
+  block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(tmp_e_cols_.data(),
+                                                          y2.data());
 
   // y3 = E y2
   tmp_rows_.setZero();
-  A_->RightMultiplyE(y2.data(), tmp_rows_.data());
+  A_->RightMultiplyAndAccumulateE(y2.data(), tmp_rows_.data());
 
   // y3 = b - y3
   tmp_rows_ = ConstVectorRef(b_, A_->num_rows()) - tmp_rows_;
 
   // rhs = F' y3
   rhs_.setZero();
-  A_->LeftMultiplyF(tmp_rows_.data(), rhs_.data());
+  A_->LeftMultiplyAndAccumulateF(tmp_rows_.data(), rhs_.data());
 }
 
 }  // namespace ceres::internal
diff --git a/internal/ceres/implicit_schur_complement.h b/internal/ceres/implicit_schur_complement.h
index 75e05a4..8fcc309 100644
--- a/internal/ceres/implicit_schur_complement.h
+++ b/internal/ceres/implicit_schur_complement.h
@@ -85,9 +85,9 @@
 // complement using the PartitionedMatrixView object.
 //
 // THREAD SAFETY: This class is not thread safe. In particular, the
-// RightMultiply (and the LeftMultiply) methods are not thread safe as
-// they depend on mutable arrays used for the temporaries needed to
-// compute the product y += Sx;
+// RightMultiplyAndAccumulate (and the LeftMultiplyAndAccumulate) methods are
+// not thread safe as they depend on mutable arrays used for the temporaries
+// needed to compute the product y += Sx;
 class CERES_NO_EXPORT ImplicitSchurComplement final : public LinearOperator {
  public:
   // num_eliminate_blocks is the number of E blocks in the matrix
@@ -114,12 +114,12 @@
   void Init(const BlockSparseMatrix& A, const double* D, const double* b);
 
   // y += Sx, where S is the Schur complement.
-  void RightMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
 
   // The Schur complement is a symmetric positive definite matrix,
   // thus the left and right multiply operators are the same.
-  void LeftMultiply(const double* x, double* y) const final {
-    RightMultiply(x, y);
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final {
+    RightMultiplyAndAccumulate(x, y);
   }
 
   // y = (E'E)^-1 (E'b - E'F x). Given an estimate of the solution to
@@ -155,7 +155,7 @@
 
   Vector rhs_;
 
-  // Temporary storage vectors used to implement RightMultiply.
+  // Temporary storage vectors used to implement RightMultiplyAndAccumulate.
   mutable Vector tmp_rows_;
   mutable Vector tmp_e_cols_;
   mutable Vector tmp_e_cols_2_;
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
index baa381a..0ebde31 100644
--- a/internal/ceres/implicit_schur_complement_test.cc
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -147,7 +147,7 @@
       y = lhs * x;
 
       Vector z(num_sc_cols);
-      isc.RightMultiply(x.data(), z.data());
+      isc.RightMultiplyAndAccumulate(x.data(), z.data());
 
       // The i^th column of the implicit schur complement is the same as
       // the explicit schur complement.
diff --git a/internal/ceres/iterative_refiner.cc b/internal/ceres/iterative_refiner.cc
index 90ff511..aaeefa3 100644
--- a/internal/ceres/iterative_refiner.cc
+++ b/internal/ceres/iterative_refiner.cc
@@ -62,7 +62,7 @@
   for (int i = 0; i < max_num_iterations_; ++i) {
     // residual = rhs - lhs * solution
     lhs_x_solution_.setZero();
-    lhs.RightMultiply(solution_ptr, lhs_x_solution_.data());
+    lhs.RightMultiplyAndAccumulate(solution_ptr, lhs_x_solution_.data());
     residual_ = rhs - lhs_x_solution_;
     // solution += lhs^-1 residual
     cholesky->Solve(residual_.data(), correction_.data(), &ignored_message);
diff --git a/internal/ceres/iterative_refiner_test.cc b/internal/ceres/iterative_refiner_test.cc
index 9ea2340..49e379e 100644
--- a/internal/ceres/iterative_refiner_test.cc
+++ b/internal/ceres/iterative_refiner_test.cc
@@ -58,13 +58,13 @@
   explicit FakeSparseMatrix(Matrix m) : m_(std::move(m)) {}
 
   // y += Ax
-  void RightMultiply(const double* x, double* y) const final {
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final {
     VectorRef(y, m_.cols()) += m_ * ConstVectorRef(x, m_.cols());
   }
   // y += A'x
-  void LeftMultiply(const double* x, double* y) const final {
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final {
     // We will assume that this is a symmetric matrix.
-    RightMultiply(x, y);
+    RightMultiplyAndAccumulate(x, y);
   }
 
   double* mutable_values() final { return m_.data(); }
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
index e93d3e9..f14292f 100644
--- a/internal/ceres/line_search_direction.cc
+++ b/internal/ceres/line_search_direction.cc
@@ -120,8 +120,8 @@
         current.gradient - previous.gradient);
 
     search_direction->setZero();
-    low_rank_inverse_hessian_.RightMultiply(current.gradient.data(),
-                                            search_direction->data());
+    low_rank_inverse_hessian_.RightMultiplyAndAccumulate(
+        current.gradient.data(), search_direction->data());
     *search_direction *= -1.0;
 
     if (search_direction->dot(current.gradient) >= 0.0) {
diff --git a/internal/ceres/linear_operator.h b/internal/ceres/linear_operator.h
index cab87e7..8a1e902 100644
--- a/internal/ceres/linear_operator.h
+++ b/internal/ceres/linear_operator.h
@@ -46,16 +46,16 @@
   virtual ~LinearOperator();
 
   // y = y + Ax;
-  virtual void RightMultiply(const double* x, double* y) const = 0;
+  virtual void RightMultiplyAndAccumulate(const double* x, double* y) const = 0;
   // y = y + A'x;
-  virtual void LeftMultiply(const double* x, double* y) const = 0;
+  virtual void LeftMultiplyAndAccumulate(const double* x, double* y) const = 0;
 
-  virtual void RightMultiply(const Vector& x, Vector& y) const {
-    RightMultiply(x.data(), y.data());
+  virtual void RightMultiplyAndAccumulate(const Vector& x, Vector& y) const {
+    RightMultiplyAndAccumulate(x.data(), y.data());
   }
 
-  virtual void LeftMultiply(const Vector& x, Vector& y) const {
-    LeftMultiply(x.data(), y.data());
+  virtual void LeftMultiplyAndAccumulate(const Vector& x, Vector& y) const {
+    LeftMultiplyAndAccumulate(x.data(), y.data());
   }
 
   virtual int num_rows() const = 0;
diff --git a/internal/ceres/low_rank_inverse_hessian.cc b/internal/ceres/low_rank_inverse_hessian.cc
index 42827e2..c471844 100644
--- a/internal/ceres/low_rank_inverse_hessian.cc
+++ b/internal/ceres/low_rank_inverse_hessian.cc
@@ -116,8 +116,8 @@
   return true;
 }
 
-void LowRankInverseHessian::RightMultiply(const double* x_ptr,
-                                          double* y_ptr) const {
+void LowRankInverseHessian::RightMultiplyAndAccumulate(const double* x_ptr,
+                                                       double* y_ptr) const {
   ConstVectorRef gradient(x_ptr, num_parameters_);
   VectorRef search_direction(y_ptr, num_parameters_);
 
diff --git a/internal/ceres/low_rank_inverse_hessian.h b/internal/ceres/low_rank_inverse_hessian.h
index de30f54..878db81 100644
--- a/internal/ceres/low_rank_inverse_hessian.h
+++ b/internal/ceres/low_rank_inverse_hessian.h
@@ -64,7 +64,7 @@
   // num_parameters is the row/column size of the Hessian.
   // max_num_corrections is the rank of the Hessian approximation.
   // use_approximate_eigenvalue_scaling controls whether the initial
-  // inverse Hessian used during Right/LeftMultiply() is scaled by
+  // inverse Hessian used during Right/LeftMultiplyAndAccumulate() is scaled by
   // the approximate eigenvalue of the true inverse Hessian at the
   // current operating point.
   // The approximation uses:
@@ -83,9 +83,9 @@
   bool Update(const Vector& delta_x, const Vector& delta_gradient);
 
   // LinearOperator interface
-  void RightMultiply(const double* x, double* y) const final;
-  void LeftMultiply(const double* x, double* y) const final {
-    RightMultiply(x, y);
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final {
+    RightMultiplyAndAccumulate(x, y);
   }
   int num_rows() const final { return num_parameters_; }
   int num_cols() const final { return num_parameters_; }
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
index 1305720..7fb1a09 100644
--- a/internal/ceres/partitioned_matrix_view.h
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -67,16 +67,18 @@
   virtual ~PartitionedMatrixViewBase();
 
   // y += E'x
-  virtual void LeftMultiplyE(const double* x, double* y) const = 0;
+  virtual void LeftMultiplyAndAccumulateE(const double* x, double* y) const = 0;
 
   // y += F'x
-  virtual void LeftMultiplyF(const double* x, double* y) const = 0;
+  virtual void LeftMultiplyAndAccumulateF(const double* x, double* y) const = 0;
 
   // y += Ex
-  virtual void RightMultiplyE(const double* x, double* y) const = 0;
+  virtual void RightMultiplyAndAccumulateE(const double* x,
+                                           double* y) const = 0;
 
   // y += Fx
-  virtual void RightMultiplyF(const double* x, double* y) const = 0;
+  virtual void RightMultiplyAndAccumulateF(const double* x,
+                                           double* y) const = 0;
 
   // Create and return the block diagonal of the matrix E'E.
   virtual std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalEtE() const = 0;
@@ -124,10 +126,10 @@
   // num_col_blocks_a column blocks.
   PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e);
 
-  void LeftMultiplyE(const double* x, double* y) const final;
-  void LeftMultiplyF(const double* x, double* y) const final;
-  void RightMultiplyE(const double* x, double* y) const final;
-  void RightMultiplyF(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulateE(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulateF(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulateE(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulateF(const double* x, double* y) const final;
   std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalEtE() const final;
   std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalFtF() const final;
   void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const final;
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
index a33b86d..2150660 100644
--- a/internal/ceres/partitioned_matrix_view_impl.h
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -87,7 +87,7 @@
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-    RightMultiplyE(const double* x, double* y) const {
+    RightMultiplyAndAccumulateE(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over the first num_row_blocks_e_ row blocks, and multiply
@@ -111,7 +111,7 @@
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-    RightMultiplyF(const double* x, double* y) const {
+    RightMultiplyAndAccumulateF(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over row blocks, and if the row block is in E, then
@@ -157,7 +157,7 @@
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-    LeftMultiplyE(const double* x, double* y) const {
+    LeftMultiplyAndAccumulateE(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over the first num_row_blocks_e_ row blocks, and multiply
@@ -181,7 +181,7 @@
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-    LeftMultiplyF(const double* x, double* y) const {
+    LeftMultiplyAndAccumulateF(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over row blocks, and if the row block is in E, then
@@ -289,8 +289,8 @@
   return block_diagonal;
 }
 
-// Similar to the code in RightMultiplyE, except instead of the matrix
-// vector multiply its an outer product.
+// Similar to the code in RightMultiplyAndAccumulateE, except instead of the
+// matrix vector multiply its an outer product.
 //
 //    block_diagonal = block_diagonal(E'E)
 //
@@ -322,8 +322,8 @@
   }
 }
 
-// Similar to the code in RightMultiplyF, except instead of the matrix
-// vector multiply its an outer product.
+// Similar to the code in RightMultiplyAndAccumulateF, except instead of the
+// matrix vector multiply its an outer product.
 //
 //   block_diagonal = block_diagonal(F'F)
 //
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
index e43e32f..cb3dd14 100644
--- a/internal/ceres/partitioned_matrix_view_test.cc
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -85,7 +85,7 @@
   EXPECT_EQ(pmv_->num_rows(), A_->num_rows());
 }
 
-TEST_F(PartitionedMatrixViewTest, RightMultiplyE) {
+TEST_F(PartitionedMatrixViewTest, RightMultiplyAndAccumulateE) {
   Vector x1(pmv_->num_cols_e());
   Vector x2(pmv_->num_cols());
   x2.setZero();
@@ -95,17 +95,17 @@
   }
 
   Vector y1 = Vector::Zero(pmv_->num_rows());
-  pmv_->RightMultiplyE(x1.data(), y1.data());
+  pmv_->RightMultiplyAndAccumulateE(x1.data(), y1.data());
 
   Vector y2 = Vector::Zero(pmv_->num_rows());
-  A_->RightMultiply(x2.data(), y2.data());
+  A_->RightMultiplyAndAccumulate(x2.data(), y2.data());
 
   for (int i = 0; i < pmv_->num_rows(); ++i) {
     EXPECT_NEAR(y1(i), y2(i), kEpsilon);
   }
 }
 
-TEST_F(PartitionedMatrixViewTest, RightMultiplyF) {
+TEST_F(PartitionedMatrixViewTest, RightMultiplyAndAccumulateF) {
   Vector x1(pmv_->num_cols_f());
   Vector x2 = Vector::Zero(pmv_->num_cols());
 
@@ -115,17 +115,17 @@
   }
 
   Vector y1 = Vector::Zero(pmv_->num_rows());
-  pmv_->RightMultiplyF(x1.data(), y1.data());
+  pmv_->RightMultiplyAndAccumulateF(x1.data(), y1.data());
 
   Vector y2 = Vector::Zero(pmv_->num_rows());
-  A_->RightMultiply(x2.data(), y2.data());
+  A_->RightMultiplyAndAccumulate(x2.data(), y2.data());
 
   for (int i = 0; i < pmv_->num_rows(); ++i) {
     EXPECT_NEAR(y1(i), y2(i), kEpsilon);
   }
 }
 
-TEST_F(PartitionedMatrixViewTest, LeftMultiply) {
+TEST_F(PartitionedMatrixViewTest, LeftMultiplyAndAccumulate) {
   Vector x = Vector::Zero(pmv_->num_rows());
   for (int i = 0; i < pmv_->num_rows(); ++i) {
     x(i) = RandDouble();
@@ -135,9 +135,9 @@
   Vector y1 = Vector::Zero(pmv_->num_cols_e());
   Vector y2 = Vector::Zero(pmv_->num_cols_f());
 
-  A_->LeftMultiply(x.data(), y.data());
-  pmv_->LeftMultiplyE(x.data(), y1.data());
-  pmv_->LeftMultiplyF(x.data(), y2.data());
+  A_->LeftMultiplyAndAccumulate(x.data(), y.data());
+  pmv_->LeftMultiplyAndAccumulateE(x.data(), y1.data());
+  pmv_->LeftMultiplyAndAccumulateF(x.data(), y2.data());
 
   for (int i = 0; i < pmv_->num_cols(); ++i) {
     EXPECT_NEAR(y(i),
diff --git a/internal/ceres/preconditioner.cc b/internal/ceres/preconditioner.cc
index 391e1b5..33174de 100644
--- a/internal/ceres/preconditioner.cc
+++ b/internal/ceres/preconditioner.cc
@@ -60,9 +60,9 @@
   return true;
 }
 
-void SparseMatrixPreconditionerWrapper::RightMultiply(const double* x,
-                                                      double* y) const {
-  matrix_->RightMultiply(x, y);
+void SparseMatrixPreconditionerWrapper::RightMultiplyAndAccumulate(
+    const double* x, double* y) const {
+  matrix_->RightMultiplyAndAccumulate(x, y);
 }
 
 int SparseMatrixPreconditionerWrapper::num_rows() const {
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index 75613fb..2d343bd 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -130,12 +130,13 @@
   virtual bool Update(const LinearOperator& A, const double* D) = 0;
 
   // LinearOperator interface. Since the operator is symmetric,
-  // LeftMultiply and num_cols are just calls to RightMultiply and
-  // num_rows respectively. Update() must be called before
-  // RightMultiply can be called.
-  void RightMultiply(const double* x, double* y) const override = 0;
-  void LeftMultiply(const double* x, double* y) const override {
-    return RightMultiply(x, y);
+  // LeftMultiplyAndAccumulate and num_cols are just calls to
+  // RightMultiplyAndAccumulate and num_rows respectively. Update() must be
+  // called before RightMultiplyAndAccumulate can be called.
+  void RightMultiplyAndAccumulate(const double* x,
+                                  double* y) const override = 0;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const override {
+    return RightMultiplyAndAccumulate(x, y);
   }
 
   int num_rows() const override = 0;
@@ -148,7 +149,7 @@
 
   bool Update(const LinearOperator& A, const double* D) final { return true; }
 
-  void RightMultiply(const double* x, double* y) const final {
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final {
     VectorRef(y, num_rows_) += ConstVectorRef(x, num_rows_);
   }
 
@@ -189,7 +190,7 @@
   ~SparseMatrixPreconditionerWrapper() override;
 
   // Preconditioner interface
-  void RightMultiply(const double* x, double* y) const override;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const override;
   int num_rows() const override;
 
  private:
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 4e47837..1f10ac2 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -70,8 +70,8 @@
 
   virtual ~BlockRandomAccessSparseMatrixAdapter() final {}
 
-  void RightMultiply(const Vector& x, Vector& y) final {
-    m_.SymmetricRightMultiply(x.data(), y.data());
+  void RightMultiplyAndAccumulate(const Vector& x, Vector& y) final {
+    m_.SymmetricRightMultiplyAndAccumulate(x.data(), y.data());
   }
 
  private:
@@ -88,8 +88,8 @@
   virtual ~BlockRandomAccessDiagonalMatrixAdapter() final {}
 
   // y = y + Ax;
-  void RightMultiply(const Vector& x, Vector& y) final {
-    m_.RightMultiply(x.data(), y.data());
+  void RightMultiplyAndAccumulate(const Vector& x, Vector& y) final {
+    m_.RightMultiplyAndAccumulate(x.data(), y.data());
   }
 
  private:
diff --git a/internal/ceres/schur_jacobi_preconditioner.cc b/internal/ceres/schur_jacobi_preconditioner.cc
index d452ba4..3317927 100644
--- a/internal/ceres/schur_jacobi_preconditioner.cc
+++ b/internal/ceres/schur_jacobi_preconditioner.cc
@@ -91,9 +91,9 @@
   return true;
 }
 
-void SchurJacobiPreconditioner::RightMultiply(const double* x,
-                                              double* y) const {
-  m_->RightMultiply(x, y);
+void SchurJacobiPreconditioner::RightMultiplyAndAccumulate(const double* x,
+                                                           double* y) const {
+  m_->RightMultiplyAndAccumulate(x, y);
 }
 
 int SchurJacobiPreconditioner::num_rows() const { return m_->num_rows(); }
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
index 76a7b3d..ddf471c 100644
--- a/internal/ceres/schur_jacobi_preconditioner.h
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -71,7 +71,7 @@
 //   SchurJacobiPreconditioner preconditioner(
 //      *A.block_structure(), options);
 //   preconditioner.Update(A, nullptr);
-//   preconditioner.RightMultiply(x, y);
+//   preconditioner.RightMultiplyAndAccumulate(x, y);
 //
 class CERES_NO_EXPORT SchurJacobiPreconditioner
     : public BlockSparseMatrixPreconditioner {
@@ -90,7 +90,7 @@
   ~SchurJacobiPreconditioner() override;
 
   // Preconditioner interface.
-  void RightMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
   int num_rows() const final;
 
  private:
diff --git a/internal/ceres/sparse_matrix.h b/internal/ceres/sparse_matrix.h
index 9fe33a3..da6af18 100644
--- a/internal/ceres/sparse_matrix.h
+++ b/internal/ceres/sparse_matrix.h
@@ -68,9 +68,10 @@
   ~SparseMatrix() override;
 
   // y += Ax;
-  void RightMultiply(const double* x, double* y) const override = 0;
+  void RightMultiplyAndAccumulate(const double* x,
+                                  double* y) const override = 0;
   // y += A'x;
-  void LeftMultiply(const double* x, double* y) const override = 0;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const override = 0;
 
   // In MATLAB notation sum(A.*A, 1)
   virtual void SquaredColumnNorm(double* x) const = 0;
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index 949991b..99205fa 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -71,7 +71,7 @@
   xref.setZero();
   rhs_.resize(num_cols);
   rhs_.setZero();
-  A->LeftMultiply(b, rhs_.data());
+  A->LeftMultiplyAndAccumulate(b, rhs_.data());
   event_logger.AddEvent("Compute RHS");
 
   if (per_solve_options.D != nullptr) {
diff --git a/internal/ceres/sparse_normal_cholesky_solver_test.cc b/internal/ceres/sparse_normal_cholesky_solver_test.cc
index b7d4a39..002b707 100644
--- a/internal/ceres/sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver_test.cc
@@ -75,7 +75,7 @@
 
     Vector rhs(A_->num_cols());
     rhs.setZero();
-    A_->LeftMultiply(b_.get(), rhs.data());
+    A_->LeftMultiplyAndAccumulate(b_.get(), rhs.data());
     Vector expected_solution = lhs.llt().solve(rhs);
 
     std::unique_ptr<LinearSolver> solver(LinearSolver::Create(options));
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
index a228545..5dc364f 100644
--- a/internal/ceres/subset_preconditioner.cc
+++ b/internal/ceres/subset_preconditioner.cc
@@ -57,7 +57,8 @@
 
 SubsetPreconditioner::~SubsetPreconditioner() = default;
 
-void SubsetPreconditioner::RightMultiply(const double* x, double* y) const {
+void SubsetPreconditioner::RightMultiplyAndAccumulate(const double* x,
+                                                      double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
   std::string message;
diff --git a/internal/ceres/subset_preconditioner.h b/internal/ceres/subset_preconditioner.h
index 1f1c6ec..7139ca6 100644
--- a/internal/ceres/subset_preconditioner.h
+++ b/internal/ceres/subset_preconditioner.h
@@ -75,7 +75,7 @@
   ~SubsetPreconditioner() override;
 
   // Preconditioner interface
-  void RightMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
   int num_rows() const final { return num_cols_; }
   int num_cols() const final { return num_cols_; }
 
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
index 2f5e044..cd86695 100644
--- a/internal/ceres/subset_preconditioner_test.cc
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -159,7 +159,7 @@
     EXPECT_TRUE(ComputeExpectedSolution(*lhs, rhs, &expected));
 
     Vector actual(lhs->num_rows());
-    preconditioner_->RightMultiply(rhs.data(), actual.data());
+    preconditioner_->RightMultiplyAndAccumulate(rhs.data(), actual.data());
 
     Matrix eigen_lhs;
     lhs->ToDenseMatrix(&eigen_lhs);
diff --git a/internal/ceres/triplet_sparse_matrix.cc b/internal/ceres/triplet_sparse_matrix.cc
index b8f43ad..49d367a 100644
--- a/internal/ceres/triplet_sparse_matrix.cc
+++ b/internal/ceres/triplet_sparse_matrix.cc
@@ -169,13 +169,15 @@
   }
 }
 
-void TripletSparseMatrix::RightMultiply(const double* x, double* y) const {
+void TripletSparseMatrix::RightMultiplyAndAccumulate(const double* x,
+                                                     double* y) const {
   for (int i = 0; i < num_nonzeros_; ++i) {
     y[rows_[i]] += values_[i] * x[cols_[i]];
   }
 }
 
-void TripletSparseMatrix::LeftMultiply(const double* x, double* y) const {
+void TripletSparseMatrix::LeftMultiplyAndAccumulate(const double* x,
+                                                    double* y) const {
   for (int i = 0; i < num_nonzeros_; ++i) {
     y[cols_[i]] += values_[i] * x[rows_[i]];
   }
diff --git a/internal/ceres/triplet_sparse_matrix.h b/internal/ceres/triplet_sparse_matrix.h
index a2ba1f1..c962455 100644
--- a/internal/ceres/triplet_sparse_matrix.h
+++ b/internal/ceres/triplet_sparse_matrix.h
@@ -65,8 +65,8 @@
 
   // Implementation of the SparseMatrix interface.
   void SetZero() final;
-  void RightMultiply(const double* x, double* y) const final;
-  void LeftMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
+  void LeftMultiplyAndAccumulate(const double* x, double* y) const final;
   void SquaredColumnNorm(double* x) const final;
   void ScaleColumns(const double* scale) final;
   void ToCRSMatrix(CRSMatrix* matrix) const;
@@ -139,6 +139,7 @@
 
   // Load a triplet sparse matrix from a text file.
   static std::unique_ptr<TripletSparseMatrix> CreateFromTextFile(FILE* file);
+
  private:
   void AllocateMemory();
   void CopyData(const TripletSparseMatrix& orig);
diff --git a/internal/ceres/triplet_sparse_matrix_test.cc b/internal/ceres/triplet_sparse_matrix_test.cc
index 577d959..b41d991 100644
--- a/internal/ceres/triplet_sparse_matrix_test.cc
+++ b/internal/ceres/triplet_sparse_matrix_test.cc
@@ -28,11 +28,11 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include "ceres/crs_matrix.h"
 #include "ceres/triplet_sparse_matrix.h"
 
 #include <memory>
 
+#include "ceres/crs_matrix.h"
 #include "gtest/gtest.h"
 
 namespace ceres::internal {
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 739304a..6693e6e 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -421,7 +421,8 @@
   //  = -f'J * step - step' * J' * J * step / 2
   //  = -(J * step)'(f + J * step / 2)
   model_residuals_.setZero();
-  jacobian_->RightMultiply(trust_region_step_.data(), model_residuals_.data());
+  jacobian_->RightMultiplyAndAccumulate(trust_region_step_.data(),
+                                        model_residuals_.data());
   model_cost_change_ =
       -model_residuals_.dot(residuals_ + model_residuals_ / 2.0);
 
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 0a7b19f..bdbe3c4 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -422,8 +422,8 @@
   return sparse_cholesky_->Factorize(lhs.get(), &message);
 }
 
-void VisibilityBasedPreconditioner::RightMultiply(const double* x,
-                                                  double* y) const {
+void VisibilityBasedPreconditioner::RightMultiplyAndAccumulate(
+    const double* x, double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
   CHECK(sparse_cholesky_ != nullptr);
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index 39bdc9f..f27eb9e 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -122,7 +122,7 @@
 //   VisibilityBasedPreconditioner preconditioner(
 //      *A.block_structure(), options);
 //   preconditioner.Update(A, nullptr);
-//   preconditioner.RightMultiply(x, y);
+//   preconditioner.RightMultiplyAndAccumulate(x, y);
 class CERES_NO_EXPORT VisibilityBasedPreconditioner
     : public BlockSparseMatrixPreconditioner {
  public:
@@ -140,7 +140,7 @@
   ~VisibilityBasedPreconditioner() override;
 
   // Preconditioner interface
-  void RightMultiply(const double* x, double* y) const final;
+  void RightMultiplyAndAccumulate(const double* x, double* y) const final;
   int num_rows() const final;
 
   friend class VisibilityBasedPreconditionerTest;
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
index 8e0d5fd..4cf1dbe 100644
--- a/internal/ceres/visibility_based_preconditioner_test.cc
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -276,7 +276,7 @@
 //     y.setZero();
 //     z.setZero();
 //     x[i] = 1.0;
-//     preconditioner_->RightMultiply(x.data(), y.data());
+//     preconditioner_->RightMultiplyAndAccumulate(x.data(), y.data());
 //     z = full_schur_complement
 //         .selfadjointView<Eigen::Upper>()
 //         .llt().solve(x);