Convert internal enums to be class enums.

Change-Id: Ide89c7115c3b12c0f2452a2969dc5523b3a7970f
diff --git a/internal/ceres/accelerate_sparse.cc b/internal/ceres/accelerate_sparse.cc
index 53ba3a0..59d9dca 100644
--- a/internal/ceres/accelerate_sparse.cc
+++ b/internal/ceres/accelerate_sparse.cc
@@ -199,7 +199,7 @@
   CHECK_EQ(lhs->storage_type(), StorageType());
   if (lhs == nullptr) {
     *message = "Failure: Input lhs is nullptr.";
-    return LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
       as_.CreateSparseMatrixTransposeView(lhs);
@@ -213,7 +213,7 @@
           "Apple Accelerate Failure : Symbolic factorisation failed: %s",
           SparseStatusToString(symbolic_factor_->status));
       FreeSymbolicFactorization();
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     }
   }
 
@@ -230,10 +230,10 @@
         "Apple Accelerate Failure : Numeric factorisation failed: %s",
         SparseStatusToString(numeric_factor_->status));
     FreeNumericFactorization();
-    return LINEAR_SOLVER_FAILURE;
+    return LinearSolverTerminationType::FAILURE;
   }
 
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 template <typename Scalar>
@@ -259,7 +259,7 @@
     VectorRef(solution, num_cols) =
         scalar_rhs_and_solution_.template cast<double>();
   }
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 template <typename Scalar>
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index 40f8d14..d663f4a 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -162,7 +162,7 @@
                                                      int max_num_nonzeros) {
   num_rows_ = num_rows;
   num_cols_ = num_cols;
-  storage_type_ = UNSYMMETRIC;
+  storage_type_ = StorageType::UNSYMMETRIC;
   rows_.resize(num_rows + 1, 0);
   cols_.resize(max_num_nonzeros, 0);
   values_.resize(max_num_nonzeros, 0.0);
@@ -254,7 +254,7 @@
 
   num_rows_ = num_rows;
   num_cols_ = num_rows;
-  storage_type_ = UNSYMMETRIC;
+  storage_type_ = StorageType::UNSYMMETRIC;
   rows_.resize(num_rows + 1);
   cols_.resize(num_rows);
   values_.resize(num_rows);
@@ -282,7 +282,7 @@
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
-  if (storage_type_ == UNSYMMETRIC) {
+  if (storage_type_ == StorageType::UNSYMMETRIC) {
     for (int r = 0; r < num_rows_; ++r) {
       for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
         const int c = cols_[idx];
@@ -290,7 +290,7 @@
         y[r] += v * x[c];
       }
     }
-  } else if (storage_type_ == UPPER_TRIANGULAR) {
+  } else if (storage_type_ == StorageType::UPPER_TRIANGULAR) {
     // Because of their block structure, we will have entries that lie
     // above (below) the diagonal for lower (upper) triangular matrices,
     // so the loops below need to account for this.
@@ -316,7 +316,7 @@
         }
       }
     }
-  } else if (storage_type_ == LOWER_TRIANGULAR) {
+  } else if (storage_type_ == StorageType::LOWER_TRIANGULAR) {
     for (int r = 0; r < num_rows_; ++r) {
       int idx = rows_[r];
       const int idx_end = rows_[r + 1];
@@ -343,7 +343,7 @@
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
-  if (storage_type_ == UNSYMMETRIC) {
+  if (storage_type_ == StorageType::UNSYMMETRIC) {
     for (int r = 0; r < num_rows_; ++r) {
       for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
         y[cols_[idx]] += values_[idx] * x[r];
@@ -359,11 +359,11 @@
   CHECK(x != nullptr);
 
   std::fill(x, x + num_cols_, 0.0);
-  if (storage_type_ == UNSYMMETRIC) {
+  if (storage_type_ == StorageType::UNSYMMETRIC) {
     for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
       x[cols_[idx]] += values_[idx] * values_[idx];
     }
-  } else if (storage_type_ == UPPER_TRIANGULAR) {
+  } else if (storage_type_ == StorageType::UPPER_TRIANGULAR) {
     // Because of their block structure, we will have entries that lie
     // above (below) the diagonal for lower (upper) triangular
     // matrices, so the loops below need to account for this.
@@ -389,7 +389,7 @@
         }
       }
     }
-  } else if (storage_type_ == LOWER_TRIANGULAR) {
+  } else if (storage_type_ == StorageType::LOWER_TRIANGULAR) {
     for (int r = 0; r < num_rows_; ++r) {
       int idx = rows_[r];
       const int idx_end = rows_[r + 1];
@@ -434,7 +434,7 @@
 void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
   CHECK_GE(delta_rows, 0);
   CHECK_LE(delta_rows, num_rows_);
-  CHECK_EQ(storage_type_, UNSYMMETRIC);
+  CHECK_EQ(storage_type_, StorageType::UNSYMMETRIC);
 
   num_rows_ -= delta_rows;
   rows_.resize(num_rows_ + 1);
@@ -458,7 +458,7 @@
 }
 
 void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
-  CHECK_EQ(storage_type_, UNSYMMETRIC);
+  CHECK_EQ(storage_type_, StorageType::UNSYMMETRIC);
   CHECK_EQ(m.num_cols(), num_cols_);
 
   CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
@@ -584,14 +584,14 @@
           num_cols_, num_rows_, num_nonzeros());
 
   switch (storage_type_) {
-    case UNSYMMETRIC:
-      transpose->set_storage_type(UNSYMMETRIC);
+    case StorageType::UNSYMMETRIC:
+      transpose->set_storage_type(StorageType::UNSYMMETRIC);
       break;
-    case LOWER_TRIANGULAR:
-      transpose->set_storage_type(UPPER_TRIANGULAR);
+    case StorageType::LOWER_TRIANGULAR:
+      transpose->set_storage_type(StorageType::UPPER_TRIANGULAR);
       break;
-    case UPPER_TRIANGULAR:
-      transpose->set_storage_type(LOWER_TRIANGULAR);
+    case StorageType::UPPER_TRIANGULAR:
+      transpose->set_storage_type(StorageType::LOWER_TRIANGULAR);
       break;
     default:
       LOG(FATAL) << "Unknown storage type: " << storage_type_;
@@ -626,7 +626,7 @@
   CHECK_GT(options.max_row_block_size, 0);
   CHECK_LE(options.min_row_block_size, options.max_row_block_size);
 
-  if (options.storage_type == UNSYMMETRIC) {
+  if (options.storage_type == StorageType::UNSYMMETRIC) {
     CHECK_GT(options.num_col_blocks, 0);
     CHECK_GT(options.min_col_block_size, 0);
     CHECK_GT(options.max_col_block_size, 0);
@@ -652,7 +652,7 @@
     row_blocks.push_back(options.min_row_block_size + delta_block_size);
   }
 
-  if (options.storage_type == UNSYMMETRIC) {
+  if (options.storage_type == StorageType::UNSYMMETRIC) {
     // Generate the col block structure.
     for (int i = 0; i < options.num_col_blocks; ++i) {
       // Generate a random integer in [min_col_block_size, max_col_block_size]
@@ -686,8 +686,10 @@
     for (int r = 0; r < options.num_row_blocks; ++r) {
       int col_block_begin = 0;
       for (int c = 0; c < options.num_col_blocks; ++c) {
-        if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
-            ((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
+        if (((options.storage_type == StorageType::UPPER_TRIANGULAR) &&
+             (r > c)) ||
+            ((options.storage_type == StorageType::LOWER_TRIANGULAR) &&
+             (r < c))) {
           col_block_begin += col_blocks[c];
           continue;
         }
@@ -696,7 +698,7 @@
         if (RandDouble() <= options.block_density) {
           // If the matrix is symmetric, then we take care to generate
           // symmetric diagonal blocks.
-          if (options.storage_type == UNSYMMETRIC || r != c) {
+          if (options.storage_type == StorageType::UNSYMMETRIC || r != c) {
             AddRandomBlock(row_blocks[r],
                            col_blocks[c],
                            row_block_begin,
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index 522734c..1d1ac95 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -50,7 +50,7 @@
 
 class CERES_NO_EXPORT CompressedRowSparseMatrix : public SparseMatrix {
  public:
-  enum StorageType {
+  enum class StorageType {
     UNSYMMETRIC,
     // Matrix is assumed to be symmetric but only the lower triangular
     // part of the matrix is stored.
@@ -176,7 +176,7 @@
     // (lower triangular) part. In this case, num_col_blocks,
     // min_col_block_size and max_col_block_size will be ignored and
     // assumed to be equal to the corresponding row settings.
-    StorageType storage_type = UNSYMMETRIC;
+    StorageType storage_type = StorageType::UNSYMMETRIC;
 
     int num_row_blocks = 0;
     int min_row_block_size = 0;
@@ -217,6 +217,23 @@
   std::vector<int> col_blocks_;
 };
 
+inline std::ostream& operator<<(std::ostream& s,
+                                CompressedRowSparseMatrix::StorageType type) {
+  switch (type) {
+    case CompressedRowSparseMatrix::StorageType::UNSYMMETRIC:
+      s << "UNSYMMETRIC";
+      break;
+    case CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR:
+      s << "UPPER_TRIANGULAR";
+      break;
+    case CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR:
+      s << "LOWER_TRIANGULAR";
+      break;
+    default:
+      s << "UNKNOWN CompressedRowSparseMatrix::StorageType";
+  }
+  return s;
+}
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index e898a5a..4151845 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -66,7 +66,6 @@
 
     a->RightMultiply(x.data(), y_a.data());
     b->RightMultiply(x.data(), y_b.data());
-
     EXPECT_EQ((y_a - y_b).norm(), 0);
   }
 }
@@ -385,12 +384,12 @@
 
 static std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
   if (::testing::get<0>(info.param) ==
-      CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
     return "UPPER";
   }
 
   if (::testing::get<0>(info.param) ==
-      CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+      CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
     return "LOWER";
   }
 
@@ -435,10 +434,10 @@
       matrix->ToDenseMatrix(&dense);
       Vector expected_y;
       if (::testing::get<0>(param) ==
-          CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+          CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
         expected_y = dense.selfadjointView<Eigen::Upper>() * x;
       } else if (::testing::get<0>(param) ==
-                 CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+                 CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
         expected_y = dense.selfadjointView<Eigen::Lower>() * x;
       } else {
         expected_y = dense * x;
@@ -461,9 +460,9 @@
 INSTANTIATE_TEST_SUITE_P(
     CompressedRowSparseMatrix,
     RightMultiplyTest,
-    ::testing::Values(CompressedRowSparseMatrix::LOWER_TRIANGULAR,
-                      CompressedRowSparseMatrix::UPPER_TRIANGULAR,
-                      CompressedRowSparseMatrix::UNSYMMETRIC),
+    ::testing::Values(CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR,
+                      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR,
+                      CompressedRowSparseMatrix::StorageType::UNSYMMETRIC),
     ParamInfoToString);
 
 class LeftMultiplyTest : public ::testing::TestWithParam<Param> {};
@@ -504,10 +503,10 @@
       matrix->ToDenseMatrix(&dense);
       Vector expected_y;
       if (::testing::get<0>(param) ==
-          CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+          CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
         expected_y = dense.selfadjointView<Eigen::Upper>() * x;
       } else if (::testing::get<0>(param) ==
-                 CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+                 CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
         expected_y = dense.selfadjointView<Eigen::Lower>() * x;
       } else {
         expected_y = dense.transpose() * x;
@@ -530,9 +529,9 @@
 INSTANTIATE_TEST_SUITE_P(
     CompressedRowSparseMatrix,
     LeftMultiplyTest,
-    ::testing::Values(CompressedRowSparseMatrix::LOWER_TRIANGULAR,
-                      CompressedRowSparseMatrix::UPPER_TRIANGULAR,
-                      CompressedRowSparseMatrix::UNSYMMETRIC),
+    ::testing::Values(CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR,
+                      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR,
+                      CompressedRowSparseMatrix::StorageType::UNSYMMETRIC),
     ParamInfoToString);
 
 class SquaredColumnNormTest : public ::testing::TestWithParam<Param> {};
@@ -569,11 +568,11 @@
       matrix->ToDenseMatrix(&dense);
       Vector expected;
       if (::testing::get<0>(param) ==
-          CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+          CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
         const Matrix full = dense.selfadjointView<Eigen::Upper>();
         expected = full.colwise().squaredNorm();
       } else if (::testing::get<0>(param) ==
-                 CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+                 CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
         const Matrix full = dense.selfadjointView<Eigen::Lower>();
         expected = full.colwise().squaredNorm();
       } else {
@@ -595,9 +594,9 @@
 INSTANTIATE_TEST_SUITE_P(
     CompressedRowSparseMatrix,
     SquaredColumnNormTest,
-    ::testing::Values(CompressedRowSparseMatrix::LOWER_TRIANGULAR,
-                      CompressedRowSparseMatrix::UPPER_TRIANGULAR,
-                      CompressedRowSparseMatrix::UNSYMMETRIC),
+    ::testing::Values(CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR,
+                      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR,
+                      CompressedRowSparseMatrix::StorageType::UNSYMMETRIC),
     ParamInfoToString);
 
 // TODO(sameeragarwal) Add tests for the random matrix creation methods.
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
index 2888173..2a0c3ab 100644
--- a/internal/ceres/conjugate_gradients_solver.cc
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -71,7 +71,7 @@
   CHECK_EQ(A->num_rows(), A->num_cols());
 
   LinearSolver::Summary summary;
-  summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
+  summary.termination_type = LinearSolverTerminationType::NO_CONVERGENCE;
   summary.message = "Maximum number of iterations reached.";
   summary.num_iterations = 0;
 
@@ -82,7 +82,7 @@
   const double norm_b = bref.norm();
   if (norm_b == 0.0) {
     xref.setZero();
-    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.termination_type = LinearSolverTerminationType::SUCCESS;
     summary.message = "Convergence. |b| = 0.";
     return summary;
   }
@@ -99,7 +99,7 @@
   r = bref - tmp;
   double norm_r = r.norm();
   if (options_.min_num_iterations == 0 && norm_r <= tol_r) {
-    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.termination_type = LinearSolverTerminationType::SUCCESS;
     summary.message =
         StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
     return summary;
@@ -122,7 +122,7 @@
     double last_rho = rho;
     rho = r.dot(z);
     if (IsZeroOrInfinity(rho)) {
-      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.termination_type = LinearSolverTerminationType::FAILURE;
       summary.message = StringPrintf("Numerical failure. rho = r'z = %e.", rho);
       break;
     }
@@ -132,7 +132,7 @@
     } else {
       double beta = rho / last_rho;
       if (IsZeroOrInfinity(beta)) {
-        summary.termination_type = LINEAR_SOLVER_FAILURE;
+        summary.termination_type = LinearSolverTerminationType::FAILURE;
         summary.message = StringPrintf(
             "Numerical failure. beta = rho_n / rho_{n-1} = %e, "
             "rho_n = %e, rho_{n-1} = %e",
@@ -149,7 +149,7 @@
     A->RightMultiply(p.data(), q.data());
     const double pq = p.dot(q);
     if ((pq <= 0) || std::isinf(pq)) {
-      summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
+      summary.termination_type = LinearSolverTerminationType::NO_CONVERGENCE;
       summary.message = StringPrintf(
           "Matrix is indefinite, no more progress can be made. "
           "p'q = %e. |p| = %e, |q| = %e",
@@ -161,7 +161,7 @@
 
     const double alpha = rho / pq;
     if (std::isinf(alpha)) {
-      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.termination_type = LinearSolverTerminationType::FAILURE;
       summary.message = StringPrintf(
           "Numerical failure. alpha = rho / pq = %e, rho = %e, pq = %e.",
           alpha,
@@ -216,7 +216,7 @@
     const double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
     if (zeta < per_solve_options.q_tolerance &&
         summary.num_iterations >= options_.min_num_iterations) {
-      summary.termination_type = LINEAR_SOLVER_SUCCESS;
+      summary.termination_type = LinearSolverTerminationType::SUCCESS;
       summary.message =
           StringPrintf("Iteration: %d Convergence: zeta = %e < %e. |r| = %e",
                        summary.num_iterations,
@@ -231,7 +231,7 @@
     norm_r = r.norm();
     if (norm_r <= tol_r &&
         summary.num_iterations >= options_.min_num_iterations) {
-      summary.termination_type = LINEAR_SOLVER_SUCCESS;
+      summary.termination_type = LinearSolverTerminationType::SUCCESS;
       summary.message =
           StringPrintf("Iteration: %d Convergence. |r| = %e <= %e.",
                        summary.num_iterations,
diff --git a/internal/ceres/conjugate_gradients_solver_test.cc b/internal/ceres/conjugate_gradients_solver_test.cc
index badad04..b27fee0 100644
--- a/internal/ceres/conjugate_gradients_solver_test.cc
+++ b/internal/ceres/conjugate_gradients_solver_test.cc
@@ -68,7 +68,7 @@
   LinearSolver::Summary summary =
       solver.Solve(A.get(), b.data(), per_solve_options, x.data());
 
-  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+  EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
   ASSERT_EQ(summary.num_iterations, 1);
 
   ASSERT_DOUBLE_EQ(1, x(0));
@@ -124,7 +124,7 @@
   LinearSolver::Summary summary =
       solver.Solve(A.get(), b.data(), per_solve_options, x.data());
 
-  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+  EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
 
   ASSERT_DOUBLE_EQ(0, x(0));
   ASSERT_DOUBLE_EQ(1, x(1));
diff --git a/internal/ceres/cuda_dense_cholesky_test.cc b/internal/ceres/cuda_dense_cholesky_test.cc
index 24db09a..13dc34b 100644
--- a/internal/ceres/cuda_dense_cholesky_test.cc
+++ b/internal/ceres/cuda_dense_cholesky_test.cc
@@ -68,10 +68,10 @@
   ASSERT_NE(dense_cuda_solver, nullptr);
   std::string error_string;
   ASSERT_EQ(dense_cuda_solver->Factorize(A.cols(), A.data(), &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS);
+            LinearSolverTerminationType::SUCCESS);
   Eigen::Vector4d x = Eigen::Vector4d::Zero();
   ASSERT_EQ(dense_cuda_solver->Solve(b.data(), x.data(), &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS);
+            LinearSolverTerminationType::SUCCESS);
   EXPECT_NEAR(x(0), 113.75 / 3.0, std::numeric_limits<double>::epsilon() * 10);
   EXPECT_NEAR(x(1), -31.0 / 3.0, std::numeric_limits<double>::epsilon() * 10);
   EXPECT_NEAR(x(2), 5.0 / 3.0, std::numeric_limits<double>::epsilon() * 10);
@@ -95,7 +95,7 @@
   ASSERT_NE(dense_cuda_solver, nullptr);
   std::string error_string;
   ASSERT_EQ(dense_cuda_solver->Factorize(A.cols(), A.data(), &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_FAILURE);
+            LinearSolverTerminationType::FAILURE);
 }
 
 TEST(CUDADenseCholesky, NegativeMatrix) {
@@ -115,7 +115,7 @@
   ASSERT_NE(dense_cuda_solver, nullptr);
   std::string error_string;
   ASSERT_EQ(dense_cuda_solver->Factorize(A.cols(), A.data(), &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_FAILURE);
+            LinearSolverTerminationType::FAILURE);
 }
 
 TEST(CUDADenseCholesky, MustFactorizeBeforeSolve) {
@@ -128,7 +128,7 @@
   ASSERT_NE(dense_cuda_solver, nullptr);
   std::string error_string;
   ASSERT_EQ(dense_cuda_solver->Solve(b.data(), nullptr, &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR);
+            LinearSolverTerminationType::FATAL_ERROR);
 }
 
 TEST(CUDADenseCholesky, Randomized1600x1600Tests) {
@@ -164,7 +164,7 @@
     LinearSolver::Summary summary;
     summary.termination_type = dense_cholesky->FactorAndSolve(
         kNumCols, lhs.data(), rhs.data(), x_computed.data(), &summary.message);
-    ASSERT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+    ASSERT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
     ASSERT_NEAR(
         (x_computed - x_expected).norm() / x_expected.norm(), 0.0, 1e-10);
   }
diff --git a/internal/ceres/cuda_dense_qr_test.cc b/internal/ceres/cuda_dense_qr_test.cc
index 4df79bd..9eb5d4d 100644
--- a/internal/ceres/cuda_dense_qr_test.cc
+++ b/internal/ceres/cuda_dense_qr_test.cc
@@ -64,10 +64,10 @@
   std::string error_string;
   ASSERT_EQ(
       dense_cuda_solver->Factorize(A.rows(), A.cols(), A.data(), &error_string),
-      LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS);
+      LinearSolverTerminationType::SUCCESS);
   Eigen::Vector4d x = Eigen::Vector4d::Zero();
   ASSERT_EQ(dense_cuda_solver->Solve(b.data(), x.data(), &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS);
+            LinearSolverTerminationType::SUCCESS);
   // Empirically observed accuracy of cuSolverDN's QR solver.
   const double kEpsilon = 1e-11;
   EXPECT_NEAR(x(0), 113.75 / 3.0, kEpsilon);
@@ -96,10 +96,10 @@
   std::string error_string;
   ASSERT_EQ(
       dense_cuda_solver->Factorize(A.rows(), A.cols(), A.data(), &error_string),
-      LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS);
+      LinearSolverTerminationType::SUCCESS);
   std::vector<double> x(2, 0);
   ASSERT_EQ(dense_cuda_solver->Solve(b.data(), x.data(), &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS);
+            LinearSolverTerminationType::SUCCESS);
   // Empirically observed accuracy of cuSolverDN's QR solver.
   const double kEpsilon = 1e-11;
   // Solution values computed with Octave.
@@ -117,7 +117,7 @@
   ASSERT_NE(dense_cuda_solver, nullptr);
   std::string error_string;
   ASSERT_EQ(dense_cuda_solver->Solve(b.data(), nullptr, &error_string),
-            LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR);
+            LinearSolverTerminationType::FATAL_ERROR);
 }
 
 TEST(CUDADenseQR, Randomized1600x100Tests) {
@@ -155,7 +155,7 @@
                                                         rhs.data(),
                                                         x_computed.data(),
                                                         &summary.message);
-    ASSERT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+    ASSERT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
     ASSERT_NEAR((x_computed - x_expected).norm() / x_expected.norm(),
                 0.0,
                 std::numeric_limits<double>::epsilon() * 400);
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index 9dd797f..46766ac 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -7,7 +7,7 @@
 //
 // * Redistributions of source code must retain the above copyright notice,
 //   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
+// * Redistributions in binary form must rep%roduce the above copyright notice,
 //   this list of conditions and the following disclaimer in the documentation
 //   and/or other materials provided with the distribution.
 // * Neither the name of Google Inc. nor the names of its contributors may be
@@ -201,7 +201,7 @@
 }
 
 CompressedRowSparseMatrix::StorageType CXSparseCholesky::StorageType() const {
-  return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+  return CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR;
 }
 
 CXSparseCholesky::CXSparseCholesky(const OrderingType ordering_type)
@@ -219,13 +219,13 @@
   CHECK_EQ(lhs->storage_type(), StorageType());
   if (lhs == nullptr) {
     *message = "Failure: Input lhs is nullptr.";
-    return LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
 
   cs_di cs_lhs = cs_.CreateSparseMatrixTransposeView(lhs);
 
   if (symbolic_factor_ == nullptr) {
-    if (ordering_type_ == NATURAL) {
+    if (ordering_type_ == OrderingType::NATURAL) {
       symbolic_factor_ = cs_.AnalyzeCholeskyWithNaturalOrdering(&cs_lhs);
     } else {
       if (!lhs->col_blocks().empty() && !(lhs->row_blocks().empty())) {
@@ -238,7 +238,7 @@
 
     if (symbolic_factor_ == nullptr) {
       *message = "CXSparse Failure : Symbolic factorization failed.";
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     }
   }
 
@@ -246,10 +246,10 @@
   numeric_factor_ = cs_.Cholesky(&cs_lhs, symbolic_factor_);
   if (numeric_factor_ == nullptr) {
     *message = "CXSparse Failure : Numeric factorization failed.";
-    return LINEAR_SOLVER_FAILURE;
+    return LinearSolverTerminationType::FAILURE;
   }
 
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 LinearSolverTerminationType CXSparseCholesky::Solve(const double* rhs,
@@ -260,7 +260,7 @@
   const int num_cols = numeric_factor_->L->n;
   memcpy(solution, rhs, num_cols * sizeof(*solution));
   cs_.Solve(symbolic_factor_, numeric_factor_, solution);
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 void CXSparseCholesky::FreeSymbolicFactorization() {
diff --git a/internal/ceres/dense_cholesky.cc b/internal/ceres/dense_cholesky.cc
index bd20e8c..a10f311 100644
--- a/internal/ceres/dense_cholesky.cc
+++ b/internal/ceres/dense_cholesky.cc
@@ -104,7 +104,7 @@
     std::string* message) {
   LinearSolverTerminationType termination_type =
       Factorize(num_cols, lhs, message);
-  if (termination_type == LINEAR_SOLVER_SUCCESS) {
+  if (termination_type == LinearSolverTerminationType::SUCCESS) {
     termination_type = Solve(rhs, solution, message);
   }
   return termination_type;
@@ -116,11 +116,11 @@
   llt_ = std::make_unique<LLTType>(m);
   if (llt_->info() != Eigen::Success) {
     *message = "Eigen failure. Unable to perform dense Cholesky factorization.";
-    return LINEAR_SOLVER_FAILURE;
+    return LinearSolverTerminationType::FAILURE;
   }
 
   *message = "Success.";
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 LinearSolverTerminationType EigenDenseCholesky::Solve(const double* rhs,
@@ -128,13 +128,13 @@
                                                       std::string* message) {
   if (llt_->info() != Eigen::Success) {
     *message = "Eigen failure. Unable to perform dense Cholesky factorization.";
-    return LINEAR_SOLVER_FAILURE;
+    return LinearSolverTerminationType::FAILURE;
   }
 
   VectorRef(solution, llt_->cols()) =
       llt_->solve(ConstVectorRef(rhs, llt_->cols()));
   *message = "Success.";
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 #ifndef CERES_NO_LAPACK
@@ -148,19 +148,19 @@
   dpotrf_(&uplo, &num_cols_, lhs_, &num_cols_, &info);
 
   if (info < 0) {
-    termination_type_ = LINEAR_SOLVER_FATAL_ERROR;
+    termination_type_ = LinearSolverTerminationType::FATAL_ERROR;
     LOG(FATAL) << "Congratulations, you found a bug in Ceres. "
                << "Please report it. "
                << "LAPACK::dpotrf fatal error. "
                << "Argument: " << -info << " is invalid.";
   } else if (info > 0) {
-    termination_type_ = LINEAR_SOLVER_FAILURE;
+    termination_type_ = LinearSolverTerminationType::FAILURE;
     *message = StringPrintf(
         "LAPACK::dpotrf numerical failure. "
         "The leading minor of order %d is not positive definite.",
         info);
   } else {
-    termination_type_ = LINEAR_SOLVER_SUCCESS;
+    termination_type_ = LinearSolverTerminationType::SUCCESS;
     *message = "Success.";
   }
   return termination_type_;
@@ -178,7 +178,7 @@
       &uplo, &num_cols_, &nrhs, lhs_, &num_cols_, solution, &num_cols_, &info);
 
   if (info < 0) {
-    termination_type_ = LINEAR_SOLVER_FATAL_ERROR;
+    termination_type_ = LinearSolverTerminationType::FATAL_ERROR;
     LOG(FATAL) << "Congratulations, you found a bug in Ceres. "
                << "Please report it. "
                << "LAPACK::dpotrs fatal error. "
@@ -186,7 +186,7 @@
   }
 
   *message = "Success";
-  termination_type_ = LINEAR_SOLVER_SUCCESS;
+  termination_type_ = LinearSolverTerminationType::SUCCESS;
 
   return termination_type_;
 }
@@ -209,7 +209,7 @@
 LinearSolverTerminationType CUDADenseCholesky::Factorize(int num_cols,
                                                          double* lhs,
                                                          std::string* message) {
-  factorize_result_ = LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+  factorize_result_ = LinearSolverTerminationType::FATAL_ERROR;
   lhs_.Reserve(num_cols * num_cols);
   num_cols_ = num_cols;
   lhs_.CopyToGpuAsync(lhs, num_cols * num_cols, stream_);
@@ -222,7 +222,7 @@
                                   &device_workspace_size) !=
       CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDpotrf_bufferSize failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   device_workspace_.Reserve(device_workspace_size);
   if (cusolverDnDpotrf(cusolver_handle_,
@@ -234,12 +234,12 @@
                        device_workspace_.size(),
                        error_.data()) != CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDpotrf failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   if (cudaDeviceSynchronize() != cudaSuccess ||
       cudaStreamSynchronize(stream_) != cudaSuccess) {
     *message = "Cuda device synchronization failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   int error = 0;
   error_.CopyToHost(&error, 1);
@@ -250,24 +250,24 @@
                << "Argument: " << -error << " is invalid.";
     // The following line is unreachable, but return failure just to be
     // pedantic, since the compiler does not know that.
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   } else if (error > 0) {
     *message = StringPrintf(
         "cuSolverDN::cusolverDnDpotrf numerical failure. "
         "The leading minor of order %d is not positive definite.",
         error);
-    factorize_result_ = LinearSolverTerminationType::LINEAR_SOLVER_FAILURE;
-    return LinearSolverTerminationType::LINEAR_SOLVER_FAILURE;
+    factorize_result_ = LinearSolverTerminationType::FAILURE;
+    return LinearSolverTerminationType::FAILURE;
   }
   *message = "Success";
-  factorize_result_ = LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS;
-  return LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS;
+  factorize_result_ = LinearSolverTerminationType::SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 LinearSolverTerminationType CUDADenseCholesky::Solve(const double* rhs,
                                                      double* solution,
                                                      std::string* message) {
-  if (factorize_result_ != LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS) {
+  if (factorize_result_ != LinearSolverTerminationType::SUCCESS) {
     *message = "Factorize did not complete successfully previously.";
     return factorize_result_;
   }
@@ -282,12 +282,12 @@
                        num_cols_,
                        error_.data()) != CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDpotrs failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   if (cudaDeviceSynchronize() != cudaSuccess ||
       cudaStreamSynchronize(stream_) != cudaSuccess) {
     *message = "Cuda device synchronization failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   int error = 0;
   error_.CopyToHost(&error, 1);
@@ -299,7 +299,7 @@
   }
   rhs_.CopyToHost(solution, num_cols_);
   *message = "Success";
-  return LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 std::unique_ptr<CUDADenseCholesky> CUDADenseCholesky::Create(
diff --git a/internal/ceres/dense_cholesky.h b/internal/ceres/dense_cholesky.h
index d561079..cc8642c 100644
--- a/internal/ceres/dense_cholesky.h
+++ b/internal/ceres/dense_cholesky.h
@@ -87,7 +87,7 @@
                                             std::string* message) = 0;
 
   // Convenience method which combines a call to Factorize and Solve. Solve is
-  // only called if Factorize returns LINEAR_SOLVER_SUCCESS.
+  // only called if Factorize returns LinearSolverTerminationType::SUCCESS.
   //
   // The input matrix lhs may be modified by the implementation to store the
   // factorization, irrespective of whether the method succeeds or not. It is
@@ -127,7 +127,8 @@
  private:
   double* lhs_ = nullptr;
   int num_cols_ = -1;
-  LinearSolverTerminationType termination_type_ = LINEAR_SOLVER_FATAL_ERROR;
+  LinearSolverTerminationType termination_type_ =
+      LinearSolverTerminationType::FATAL_ERROR;
 };
 #endif  // CERES_NO_LAPACK
 
@@ -171,7 +172,8 @@
   CudaBuffer<int> error_;
   // Cache the result of Factorize to ensure that when Solve is called, the
   // factorization of lhs is valid.
-  LinearSolverTerminationType factorize_result_ = LINEAR_SOLVER_FATAL_ERROR;
+  LinearSolverTerminationType factorize_result_ =
+      LinearSolverTerminationType::FATAL_ERROR;
 };
 
 #endif  // CERES_NO_CUDA
diff --git a/internal/ceres/dense_cholesky_test.cc b/internal/ceres/dense_cholesky_test.cc
index 59ee9f7..7866d5c 100644
--- a/internal/ceres/dense_cholesky_test.cc
+++ b/internal/ceres/dense_cholesky_test.cc
@@ -87,7 +87,7 @@
       LinearSolver::Summary summary;
       summary.termination_type = dense_cholesky->FactorAndSolve(
           num_cols, lhs.data(), rhs.data(), actual.data(), &summary.message);
-      EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+      EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
       EXPECT_NEAR((x - actual).norm() / x.norm(),
                   0.0,
                   std::numeric_limits<double>::epsilon() * 10)
diff --git a/internal/ceres/dense_linear_solver_test.cc b/internal/ceres/dense_linear_solver_test.cc
index 8c662cd..4d3f8f6 100644
--- a/internal/ceres/dense_linear_solver_test.cc
+++ b/internal/ceres/dense_linear_solver_test.cc
@@ -86,7 +86,7 @@
   Vector solution(num_cols);
   LinearSolver::Summary summary =
       solver->Solve(&lhs, rhs.data(), per_solve_options, solution.data());
-  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+  EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
 
   Vector normal_rhs = lhs.matrix().transpose() * rhs.head(num_rows);
   Matrix normal_lhs = lhs.matrix().transpose() * lhs.matrix();
diff --git a/internal/ceres/dense_qr.cc b/internal/ceres/dense_qr.cc
index b2d9bb8..cbe7533 100644
--- a/internal/ceres/dense_qr.cc
+++ b/internal/ceres/dense_qr.cc
@@ -152,7 +152,7 @@
                                                     std::string* message) {
   LinearSolverTerminationType termination_type =
       Factorize(num_rows, num_cols, lhs, message);
-  if (termination_type == LINEAR_SOLVER_SUCCESS) {
+  if (termination_type == LinearSolverTerminationType::SUCCESS) {
     termination_type = Solve(rhs, solution, message);
   }
   return termination_type;
@@ -165,7 +165,7 @@
   Eigen::Map<ColMajorMatrix> m(lhs, num_rows, num_cols);
   qr_ = std::make_unique<QRType>(m);
   *message = "Success.";
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 LinearSolverTerminationType EigenDenseQR::Solve(const double* rhs,
@@ -174,7 +174,7 @@
   VectorRef(solution, qr_->cols()) =
       qr_->solve(ConstVectorRef(rhs, qr_->rows()));
   *message = "Success.";
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 #ifndef CERES_NO_LAPACK
@@ -236,7 +236,7 @@
                << "Argument: " << -info << " is invalid.";
   }
 
-  termination_type_ = LINEAR_SOLVER_SUCCESS;
+  termination_type_ = LinearSolverTerminationType::SUCCESS;
   *message = "Success.";
   return termination_type_;
 }
@@ -244,7 +244,7 @@
 LinearSolverTerminationType LAPACKDenseQR::Solve(const double* rhs,
                                                  double* solution,
                                                  std::string* message) {
-  if (termination_type_ != LINEAR_SOLVER_SUCCESS) {
+  if (termination_type_ != LinearSolverTerminationType::SUCCESS) {
     *message = "QR factorization failed and solve called.";
     return termination_type_;
   }
@@ -297,10 +297,10 @@
     *message =
         "QR factorization failure. The factorization is not full rank. R has "
         "zeros on the diagonal.";
-    termination_type_ = LINEAR_SOLVER_FAILURE;
+    termination_type_ = LinearSolverTerminationType::FAILURE;
   } else {
     std::copy_n(q_transpose_rhs_.data(), num_cols_, solution);
-    termination_type_ = LINEAR_SOLVER_SUCCESS;
+    termination_type_ = LinearSolverTerminationType::SUCCESS;
   }
 
   return termination_type_;
@@ -326,7 +326,7 @@
                                                    int num_cols,
                                                    double* lhs,
                                                    std::string* message) {
-  factorize_result_ = LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+  factorize_result_ = LinearSolverTerminationType::FATAL_ERROR;
   lhs_.Reserve(num_rows * num_cols);
   tau_.Reserve(std::min(num_rows, num_cols));
   num_rows_ = num_rows;
@@ -341,7 +341,7 @@
                                   &device_workspace_size) !=
       CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDgeqrf_bufferSize failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   device_workspace_.Reserve(device_workspace_size);
   if (cusolverDnDgeqrf(cusolver_handle_,
@@ -354,12 +354,12 @@
                        device_workspace_.size(),
                        error_.data()) != CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDgeqrf failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   if (cudaDeviceSynchronize() != cudaSuccess ||
       cudaStreamSynchronize(stream_) != cudaSuccess) {
     *message = "Cuda device synchronization failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   int error = 0;
   error_.CopyToHost(&error, 1);
@@ -370,18 +370,18 @@
                << "Argument: " << -error << " is invalid.";
     // The following line is unreachable, but return failure just to be
     // pedantic, since the compiler does not know that.
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
 
   *message = "Success";
-  factorize_result_ = LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS;
-  return LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS;
+  factorize_result_ = LinearSolverTerminationType::SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 LinearSolverTerminationType CUDADenseQR::Solve(const double* rhs,
                                                double* solution,
                                                std::string* message) {
-  if (factorize_result_ != LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS) {
+  if (factorize_result_ != LinearSolverTerminationType::SUCCESS) {
     *message = "Factorize did not complete successfully previously.";
     return factorize_result_;
   }
@@ -401,7 +401,7 @@
                                   &device_workspace_size) !=
       CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDormqr_bufferSize failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   device_workspace_.Reserve(device_workspace_size);
   // Compute rhs = Q^T * rhs, assuming that lhs has already been factorized.
@@ -421,7 +421,7 @@
                        device_workspace_.size(),
                        error_.data()) != CUSOLVER_STATUS_SUCCESS) {
     *message = "cuSolverDN::cusolverDnDormqr failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   int error = 0;
   error_.CopyToHost(&error, 1);
@@ -443,16 +443,16 @@
                   rhs_.data(),
                   1) != CUBLAS_STATUS_SUCCESS) {
     *message = "cuBLAS::cublasDtrsv failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   if (cudaDeviceSynchronize() != cudaSuccess ||
       cudaStreamSynchronize(stream_) != cudaSuccess) {
     *message = "Cuda device synchronization failed.";
-    return LinearSolverTerminationType::LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
   rhs_.CopyToHost(solution, num_cols_);
   *message = "Success";
-  return LinearSolverTerminationType::LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 std::unique_ptr<CUDADenseQR> CUDADenseQR::Create(
diff --git a/internal/ceres/dense_qr.h b/internal/ceres/dense_qr.h
index 6992563..0d2577a 100644
--- a/internal/ceres/dense_qr.h
+++ b/internal/ceres/dense_qr.h
@@ -91,7 +91,7 @@
                                             std::string* message) = 0;
 
   // Convenience method which combines a call to Factorize and Solve. Solve is
-  // only called if Factorize returns LINEAR_SOLVER_SUCCESS.
+  // only called if Factorize returns LinearSolverTerminationType::SUCCESS.
   //
   // The input matrix lhs may be modified by the implementation to store the
   // factorization, irrespective of whether the method succeeds or not. It is
@@ -135,7 +135,8 @@
   double* lhs_ = nullptr;
   int num_rows_;
   int num_cols_;
-  LinearSolverTerminationType termination_type_ = LINEAR_SOLVER_FATAL_ERROR;
+  LinearSolverTerminationType termination_type_ =
+      LinearSolverTerminationType::FATAL_ERROR;
   Vector work_;
   Vector tau_;
   Vector q_transpose_rhs_;
@@ -193,7 +194,8 @@
   CudaBuffer<int> error_;
   // Cache the result of Factorize to ensure that when Solve is called, the
   // factiorization of lhs is valid.
-  LinearSolverTerminationType factorize_result_ = LINEAR_SOLVER_FATAL_ERROR;
+  LinearSolverTerminationType factorize_result_ =
+      LinearSolverTerminationType::FATAL_ERROR;
 };
 
 #endif  // CERES_NO_CUDA
diff --git a/internal/ceres/dense_qr_test.cc b/internal/ceres/dense_qr_test.cc
index 6c0ad31..acef750 100644
--- a/internal/ceres/dense_qr_test.cc
+++ b/internal/ceres/dense_qr_test.cc
@@ -93,7 +93,8 @@
                                                             rhs.data(),
                                                             actual.data(),
                                                             &summary.message);
-        ASSERT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+        ASSERT_EQ(summary.termination_type,
+                  LinearSolverTerminationType::SUCCESS);
         ASSERT_NEAR((x - actual).norm() / x.norm(), 0.0, kEpsilon)
             << "\nexpected: " << x.transpose()
             << "\nactual  : " << actual.transpose();
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index c9369c2..ac8c7d7 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -100,7 +100,7 @@
     }
     TrustRegionStrategy::Summary summary;
     summary.num_iterations = 0;
-    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.termination_type = LinearSolverTerminationType::SUCCESS;
     return summary;
   }
 
@@ -137,11 +137,13 @@
   summary.num_iterations = linear_solver_summary.num_iterations;
   summary.termination_type = linear_solver_summary.termination_type;
 
-  if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+  if (linear_solver_summary.termination_type ==
+      LinearSolverTerminationType::FATAL_ERROR) {
     return summary;
   }
 
-  if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
+  if (linear_solver_summary.termination_type !=
+      LinearSolverTerminationType::FAILURE) {
     switch (dogleg_type_) {
       // Interpolate the Cauchy point and the Gauss-Newton step.
       case TRADITIONAL_DOGLEG:
@@ -152,7 +154,7 @@
       // Cauchy point and the (Gauss-)Newton step.
       case SUBSPACE_DOGLEG:
         if (!ComputeSubspaceModel(jacobian)) {
-          summary.termination_type = LINEAR_SOLVER_FAILURE;
+          summary.termination_type = LinearSolverTerminationType::FAILURE;
           break;
         }
         ComputeSubspaceDoglegStep(step);
@@ -517,7 +519,7 @@
     const double* residuals) {
   const int n = jacobian->num_cols();
   LinearSolver::Summary linear_solver_summary;
-  linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
+  linear_solver_summary.termination_type = LinearSolverTerminationType::FAILURE;
 
   // The Jacobian matrix is often quite poorly conditioned. Thus it is
   // necessary to add a diagonal matrix at the bottom to prevent the
@@ -530,7 +532,7 @@
   // If the solve fails, the multiplier to the diagonal is increased
   // up to max_mu_ by a factor of mu_increase_factor_ every time. If
   // the linear solver is still not successful, the strategy returns
-  // with LINEAR_SOLVER_FAILURE.
+  // with LinearSolverTerminationType::FAILURE.
   //
   // Next time when a new Gauss-Newton step is requested, the
   // multiplier starts out from the last successful solve.
@@ -581,21 +583,25 @@
       }
     }
 
-    if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    if (linear_solver_summary.termination_type ==
+        LinearSolverTerminationType::FATAL_ERROR) {
       return linear_solver_summary;
     }
 
-    if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
+    if (linear_solver_summary.termination_type ==
+            LinearSolverTerminationType::FAILURE ||
         !IsArrayValid(n, gauss_newton_step_.data())) {
       mu_ *= mu_increase_factor_;
       VLOG(2) << "Increasing mu " << mu_;
-      linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
+      linear_solver_summary.termination_type =
+          LinearSolverTerminationType::FAILURE;
       continue;
     }
     break;
   }
 
-  if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
+  if (linear_solver_summary.termination_type !=
+      LinearSolverTerminationType::FAILURE) {
     // The scaled Gauss-Newton step is D * GN:
     //
     //     - (D^-1 J^T J D^-1)^-1 (D^-1 g)
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
index d1ef611..a4b7f08 100644
--- a/internal/ceres/dogleg_strategy_test.cc
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -145,7 +145,7 @@
   TrustRegionStrategy::Summary summary =
       strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
-  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NE(summary.termination_type, LinearSolverTerminationType::FAILURE);
   EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
 }
 
@@ -163,7 +163,7 @@
   TrustRegionStrategy::Summary summary =
       strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
-  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NE(summary.termination_type, LinearSolverTerminationType::FAILURE);
   EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
 }
 
@@ -181,7 +181,7 @@
   TrustRegionStrategy::Summary summary =
       strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
-  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NE(summary.termination_type, LinearSolverTerminationType::FAILURE);
   EXPECT_NEAR(x_(0), 1.0, kToleranceLoose);
   EXPECT_NEAR(x_(1), 1.0, kToleranceLoose);
   EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
@@ -239,7 +239,7 @@
   TrustRegionStrategy::Summary summary =
       strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
-  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NE(summary.termination_type, LinearSolverTerminationType::FAILURE);
   EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(2), options_.initial_radius, kToleranceLoose);
@@ -265,7 +265,7 @@
   TrustRegionStrategy::Summary summary =
       strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
-  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NE(summary.termination_type, LinearSolverTerminationType::FAILURE);
   EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index 61c402b..f31004b 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -112,7 +112,7 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
-  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.termination_type = LinearSolverTerminationType::FATAL_ERROR;
   summary.message =
       "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
       "because Ceres was not built with support for "
@@ -137,7 +137,7 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.termination_type = LinearSolverTerminationType::SUCCESS;
   summary.message = "Success.";
 
   solver.analyzePattern(lhs);
@@ -149,7 +149,7 @@
 
   event_logger.AddEvent("Analyze");
   if (solver.info() != Eigen::Success) {
-    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    summary.termination_type = LinearSolverTerminationType::FATAL_ERROR;
     summary.message = "Eigen failure. Unable to find symbolic factorization.";
     return summary;
   }
@@ -157,7 +157,7 @@
   solver.factorize(lhs);
   event_logger.AddEvent("Factorize");
   if (solver.info() != Eigen::Success) {
-    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.termination_type = LinearSolverTerminationType::FAILURE;
     summary.message = "Eigen failure. Unable to find numeric factorization.";
     return summary;
   }
@@ -166,7 +166,7 @@
   VectorRef(rhs_and_solution, lhs.cols()) = solver.solve(rhs);
   event_logger.AddEvent("Solve");
   if (solver.info() != Eigen::Success) {
-    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.termination_type = LinearSolverTerminationType::FAILURE;
     summary.message = "Eigen failure. Unable to do triangular solve.";
     return summary;
   }
@@ -181,7 +181,7 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
-  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.termination_type = LinearSolverTerminationType::FATAL_ERROR;
   summary.message =
       "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
       "because Ceres was not built with support for CXSparse. "
@@ -195,7 +195,7 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.termination_type = LinearSolverTerminationType::SUCCESS;
   summary.message = "Success.";
 
   CXSparse cxsparse;
@@ -216,7 +216,7 @@
   event_logger.AddEvent("NormalEquations");
 
   if (!cxsparse.SolveCholesky(lhs, rhs_and_solution)) {
-    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.termination_type = LinearSolverTerminationType::FAILURE;
     summary.message = "CXSparse::SolveCholesky failed";
   }
   event_logger.AddEvent("Solve");
@@ -234,7 +234,7 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
-  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.termination_type = LinearSolverTerminationType::FATAL_ERROR;
   summary.message =
       "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
       "because Ceres was not built with support for SuiteSparse. "
@@ -246,7 +246,7 @@
   EventLogger event_logger(
       "DynamicSparseNormalCholeskySolver::SuiteSparse::Solve");
   LinearSolver::Summary summary;
-  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.termination_type = LinearSolverTerminationType::SUCCESS;
   summary.num_iterations = 1;
   summary.message = "Success.";
 
@@ -258,12 +258,12 @@
   event_logger.AddEvent("Analysis");
 
   if (factor == nullptr) {
-    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    summary.termination_type = LinearSolverTerminationType::FATAL_ERROR;
     return summary;
   }
 
   summary.termination_type = ss.Cholesky(&lhs, factor, &summary.message);
-  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+  if (summary.termination_type == LinearSolverTerminationType::SUCCESS) {
     cholmod_dense cholmod_rhs =
         ss.CreateDenseVectorView(rhs_and_solution, num_cols);
     cholmod_dense* solution = ss.Solve(factor, &cholmod_rhs, &summary.message);
@@ -273,7 +273,7 @@
           rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
       ss.Free(solution);
     } else {
-      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.termination_type = LinearSolverTerminationType::FAILURE;
     }
   }
 
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
index f2733d8..f3bf650 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
@@ -83,7 +83,7 @@
     summary = solver->Solve(
         A_.get(), b_.get(), per_solve_options, actual_solution.data());
 
-    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+    EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
 
     for (int i = 0; i < A_->num_cols(); ++i) {
       EXPECT_NEAR(expected_solution(i), actual_solution(i), 1e-8)
diff --git a/internal/ceres/eigensparse.cc b/internal/ceres/eigensparse.cc
index c1b7cc6..4c68c7a 100644
--- a/internal/ceres/eigensparse.cc
+++ b/internal/ceres/eigensparse.cc
@@ -50,7 +50,7 @@
  public:
   EigenSparseCholeskyTemplate() = default;
   CompressedRowSparseMatrix::StorageType StorageType() const final {
-    return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+    return CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR;
   }
 
   LinearSolverTerminationType Factorize(
@@ -67,7 +67,7 @@
 
       if (solver_.info() != Eigen::Success) {
         *message = "Eigen failure. Unable to find symbolic factorization.";
-        return LINEAR_SOLVER_FATAL_ERROR;
+        return LinearSolverTerminationType::FATAL_ERROR;
       }
 
       analyzed_ = true;
@@ -76,9 +76,9 @@
     solver_.factorize(lhs);
     if (solver_.info() != Eigen::Success) {
       *message = "Eigen failure. Unable to find numeric factorization.";
-      return LINEAR_SOLVER_FAILURE;
+      return LinearSolverTerminationType::FAILURE;
     }
-    return LINEAR_SOLVER_SUCCESS;
+    return LinearSolverTerminationType::SUCCESS;
   }
 
   LinearSolverTerminationType Solve(const double* rhs_ptr,
@@ -100,9 +100,9 @@
 
     if (solver_.info() != Eigen::Success) {
       *message = "Eigen failure. Unable to do triangular solve.";
-      return LINEAR_SOLVER_FAILURE;
+      return LinearSolverTerminationType::FAILURE;
     }
-    return LINEAR_SOLVER_SUCCESS;
+    return LinearSolverTerminationType::SUCCESS;
   }
 
   LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
@@ -149,7 +149,7 @@
                             Eigen::Upper,
                             Eigen::NaturalOrdering<int>>;
 
-  if (ordering_type == AMD) {
+  if (ordering_type == OrderingType::AMD) {
     return std::make_unique<EigenSparseCholeskyTemplate<WithAMDOrdering>>();
   } else {
     return std::make_unique<EigenSparseCholeskyTemplate<WithNaturalOrdering>>();
@@ -167,7 +167,7 @@
       Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
                             Eigen::Upper,
                             Eigen::NaturalOrdering<int>>;
-  if (ordering_type == AMD) {
+  if (ordering_type == OrderingType::AMD) {
     return std::make_unique<EigenSparseCholeskyTemplate<WithAMDOrdering>>();
   } else {
     return std::make_unique<EigenSparseCholeskyTemplate<WithNaturalOrdering>>();
diff --git a/internal/ceres/inner_product_computer.cc b/internal/ceres/inner_product_computer.cc
index 6660b38..69a68ee 100644
--- a/internal/ceres/inner_product_computer.cc
+++ b/internal/ceres/inner_product_computer.cc
@@ -129,8 +129,10 @@
     const int start_row_block,
     const int end_row_block,
     CompressedRowSparseMatrix::StorageType product_storage_type) {
-  CHECK(product_storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR ||
-        product_storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+  CHECK(product_storage_type ==
+            CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR ||
+        product_storage_type ==
+            CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
   CHECK_GT(m.num_nonzeros(), 0)
       << "Congratulations, you found a bug in Ceres. Please report it.";
   std::unique_ptr<InnerProductComputer> inner_product_computer(
@@ -156,7 +158,8 @@
     for (int c1 = 0; c1 < row.cells.size(); ++c1) {
       const Cell& cell1 = row.cells[c1];
       int c2_begin, c2_end;
-      if (product_storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+      if (product_storage_type ==
+          CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
         c2_begin = 0;
         c2_end = c1 + 1;
       } else {
@@ -301,7 +304,8 @@
                           rows[bs->cols[cell1.block_id].position];
 
       int c2_begin, c2_end;
-      if (storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+      if (storage_type ==
+          CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
         c2_begin = 0;
         c2_end = c1 + 1;
       } else {
diff --git a/internal/ceres/inner_product_computer_test.cc b/internal/ceres/inner_product_computer_test.cc
index b01ad8e..18a43d3 100644
--- a/internal/ceres/inner_product_computer_test.cc
+++ b/internal/ceres/inner_product_computer_test.cc
@@ -62,7 +62,7 @@
     EXPECT_EQ(actual_inner_product.rows(), expected_inner_product.rows());   \
     Matrix expected_t, actual_t;                                             \
     if (actual_product_crsm->storage_type() ==                               \
-        CompressedRowSparseMatrix::LOWER_TRIANGULAR) {                       \
+        CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {          \
       expected_t = expected_inner_product.triangularView<Eigen::Upper>();    \
       actual_t = actual_inner_product.triangularView<Eigen::Upper>();        \
     } else {                                                                 \
@@ -128,10 +128,12 @@
         std::unique_ptr<InnerProductComputer> inner_product_computer;
 
         inner_product_computer = InnerProductComputer::Create(
-            *random_matrix, CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+            *random_matrix,
+            CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
         inner_product_computer = InnerProductComputer::Create(
-            *random_matrix, CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+            *random_matrix,
+            CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
       }
     }
@@ -205,13 +207,13 @@
             *random_matrix,
             start_row_block,
             end_row_block,
-            CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+            CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
         inner_product_computer = InnerProductComputer::Create(
             *random_matrix,
             start_row_block,
             end_row_block,
-            CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+            CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
       }
     }
diff --git a/internal/ceres/iterative_refiner_test.cc b/internal/ceres/iterative_refiner_test.cc
index 3298a44..5718f14 100644
--- a/internal/ceres/iterative_refiner_test.cc
+++ b/internal/ceres/iterative_refiner_test.cc
@@ -98,15 +98,16 @@
     VectorRef solution(solution_ptr, num_cols);
     ConstVectorRef rhs(rhs_ptr, num_cols);
     solution = lhs_.llt().solve(rhs.cast<Scalar>()).template cast<double>();
-    return LINEAR_SOLVER_SUCCESS;
+    return LinearSolverTerminationType::SUCCESS;
   }
 
   // The following methods are not needed for tests in this file.
   CompressedRowSparseMatrix::StorageType StorageType() const final
-      DO_NOT_CALL_WITH_RETURN(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+      DO_NOT_CALL_WITH_RETURN(
+          CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
   LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
                                         std::string* message) final
-      DO_NOT_CALL_WITH_RETURN(LINEAR_SOLVER_FAILURE);
+      DO_NOT_CALL_WITH_RETURN(LinearSolverTerminationType::FAILURE);
 
  private:
   Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_;
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index cc4abd2..9cacf0b 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -85,7 +85,7 @@
     VLOG(2) << "No parameter blocks left in the schur complement.";
     LinearSolver::Summary summary;
     summary.num_iterations = 0;
-    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.termination_type = LinearSolverTerminationType::SUCCESS;
     schur_complement_->BackSubstitute(nullptr, x);
     return summary;
   }
@@ -108,7 +108,7 @@
     if (!preconditioner_->Update(*A, per_solve_options.D)) {
       LinearSolver::Summary summary;
       summary.num_iterations = 0;
-      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.termination_type = LinearSolverTerminationType::FAILURE;
       summary.message = "Preconditioner update failed.";
       return summary;
     }
@@ -122,8 +122,8 @@
                       schur_complement_->rhs().data(),
                       cg_per_solve_options,
                       reduced_linear_system_solution_.data());
-  if (summary.termination_type != LINEAR_SOLVER_FAILURE &&
-      summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) {
+  if (summary.termination_type != LinearSolverTerminationType::FAILURE &&
+      summary.termination_type != LinearSolverTerminationType::FATAL_ERROR) {
     schur_complement_->BackSubstitute(reduced_linear_system_solution_.data(),
                                       x);
   }
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index 88e5ea0..b32e131 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -107,15 +107,18 @@
   LinearSolver::Summary linear_solver_summary =
       linear_solver_->Solve(jacobian, residuals, solve_options, step);
 
-  if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+  if (linear_solver_summary.termination_type ==
+      LinearSolverTerminationType::FATAL_ERROR) {
     LOG(WARNING) << "Linear solver fatal error: "
                  << linear_solver_summary.message;
-  } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE) {
+  } else if (linear_solver_summary.termination_type ==
+             LinearSolverTerminationType::FAILURE) {
     LOG(WARNING) << "Linear solver failure. Failed to compute a step: "
                  << linear_solver_summary.message;
   } else if (!IsArrayValid(num_parameters, step)) {
     LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
-    linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
+    linear_solver_summary.termination_type =
+        LinearSolverTerminationType::FAILURE;
   } else {
     VectorRef(step, num_parameters) *= -1.0;
   }
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
index d79323f..1ac4c07 100644
--- a/internal/ceres/levenberg_marquardt_strategy_test.cc
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -159,7 +159,7 @@
 
     TrustRegionStrategy::Summary summary =
         lms.ComputeStep(pso, &dsm, &residual, x);
-    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_FAILURE);
+    EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::FAILURE);
   }
 }
 
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index b07f53d..57c1a79 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -54,34 +54,57 @@
 
 namespace ceres::internal {
 
-enum LinearSolverTerminationType {
+enum class LinearSolverTerminationType {
   // Termination criterion was met.
-  LINEAR_SOLVER_SUCCESS,
+  SUCCESS,
 
   // Solver ran for max_num_iterations and terminated before the
   // termination tolerance could be satisfied.
-  LINEAR_SOLVER_NO_CONVERGENCE,
+  NO_CONVERGENCE,
 
   // Solver was terminated due to numerical problems, generally due to
   // the linear system being poorly conditioned.
-  LINEAR_SOLVER_FAILURE,
+  FAILURE,
 
   // Solver failed with a fatal error that cannot be recovered from,
   // e.g. CHOLMOD ran out of memory when computing the symbolic or
   // numeric factorization or an underlying library was called with
   // the wrong arguments.
-  LINEAR_SOLVER_FATAL_ERROR
+  FATAL_ERROR
 };
+inline std::ostream& operator<<(std::ostream& s,
+                                LinearSolverTerminationType type) {
+  switch (type) {
+    case LinearSolverTerminationType::SUCCESS:
+      s << "LINEAR_SOLVER_SUCCESS";
+      break;
+    case LinearSolverTerminationType::NO_CONVERGENCE:
+      s << "LINEAR_SOLVER_NO_CONVERGENCE";
+      break;
+    case LinearSolverTerminationType::FAILURE:
+      s << "LINEAR_SOLVER_FAILURE";
+      break;
+    case LinearSolverTerminationType::FATAL_ERROR:
+      s << "LINEAR_SOLVER_FATAL_ERROR";
+      break;
+    default:
+      s << "UNKNOWN LinearSolverTerminationType";
+  }
+  return s;
+}
 
 // This enum controls the fill-reducing ordering a sparse linear
 // algebra library should use before computing a sparse factorization
 // (usually Cholesky).
-enum OrderingType {
+//
+// TODO(sameeragarwal): Add support for nested dissection
+enum class OrderingType {
   NATURAL,  // Do not re-order the matrix. This is useful when the
             // matrix has been ordered using a fill-reducing ordering
             // already.
-  AMD       // Use the Approximate Minimum Degree algorithm to re-order
-            // the matrix.
+
+  AMD,  // Use the Approximate Minimum Degree algorithm to re-order
+        // the matrix.
 };
 
 class LinearOperator;
@@ -260,7 +283,8 @@
   struct Summary {
     double residual_norm = -1.0;
     int num_iterations = -1;
-    LinearSolverTerminationType termination_type = LINEAR_SOLVER_FAILURE;
+    LinearSolverTerminationType termination_type =
+        LinearSolverTerminationType::FAILURE;
     std::string message;
   };
 
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 693b519..28e6a5d 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -168,7 +168,7 @@
       SolveReducedLinearSystem(per_solve_options, reduced_solution);
   event_logger.AddEvent("ReducedSolve");
 
-  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+  if (summary.termination_type == LinearSolverTerminationType::SUCCESS) {
     eliminator_->BackSubstitute(
         BlockSparseMatrixData(*A), b, per_solve_options.D, reduced_solution, x);
     event_logger.AddEvent("BackSubstitute");
@@ -206,7 +206,7 @@
     const LinearSolver::PerSolveOptions& per_solve_options, double* solution) {
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
-  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.termination_type = LinearSolverTerminationType::SUCCESS;
   summary.message = "Success.";
 
   auto* m = down_cast<BlockRandomAccessDenseMatrix*>(mutable_lhs());
@@ -315,7 +315,7 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
-  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.termination_type = LinearSolverTerminationType::SUCCESS;
   summary.message = "Success.";
 
   const TripletSparseMatrix* tsm =
@@ -327,12 +327,15 @@
   std::unique_ptr<CompressedRowSparseMatrix> lhs;
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky_->StorageType();
-  if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+  if (storage_type ==
+      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
     lhs = CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
-    lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+    lhs->set_storage_type(
+        CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
   } else {
     lhs = CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm);
-    lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+    lhs->set_storage_type(
+        CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR);
   }
 
   *lhs->mutable_col_blocks() = blocks_;
@@ -354,7 +357,7 @@
   if (num_rows == 0) {
     LinearSolver::Summary summary;
     summary.num_iterations = 0;
-    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.termination_type = LinearSolverTerminationType::SUCCESS;
     summary.message = "Success.";
     return summary;
   }
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index 2df8267..697d54a 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -122,7 +122,7 @@
     }
 
     summary = solver->Solve(A.get(), b.get(), per_solve_options, x.data());
-    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+    EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
 
     if (regularization) {
       ASSERT_NEAR((sol_d - x).norm() / num_cols, 0, 1e-10)
diff --git a/internal/ceres/sparse_cholesky.cc b/internal/ceres/sparse_cholesky.cc
index 9080b34..ac7c9b2 100644
--- a/internal/ceres/sparse_cholesky.cc
+++ b/internal/ceres/sparse_cholesky.cc
@@ -44,7 +44,8 @@
 
 std::unique_ptr<SparseCholesky> SparseCholesky::Create(
     const LinearSolver::Options& options) {
-  const OrderingType ordering_type = options.use_postordering ? AMD : NATURAL;
+  const OrderingType ordering_type =
+      options.use_postordering ? OrderingType::AMD : OrderingType::NATURAL;
   std::unique_ptr<SparseCholesky> sparse_cholesky;
 
   switch (options.sparse_linear_algebra_library_type) {
@@ -122,7 +123,7 @@
     double* solution,
     std::string* message) {
   LinearSolverTerminationType termination_type = Factorize(lhs, message);
-  if (termination_type == LINEAR_SOLVER_SUCCESS) {
+  if (termination_type == LinearSolverTerminationType::SUCCESS) {
     termination_type = Solve(rhs, solution, message);
   }
   return termination_type;
@@ -152,12 +153,12 @@
                                                          std::string* message) {
   CHECK(lhs_ != nullptr);
   auto termination_type = sparse_cholesky_->Solve(rhs, solution, message);
-  if (termination_type != LINEAR_SOLVER_SUCCESS) {
+  if (termination_type != LinearSolverTerminationType::SUCCESS) {
     return termination_type;
   }
 
   iterative_refiner_->Refine(*lhs_, rhs, sparse_cholesky_.get(), solution);
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 }  // namespace ceres::internal
diff --git a/internal/ceres/sparse_cholesky.h b/internal/ceres/sparse_cholesky.h
index 831beee..f4f8996 100644
--- a/internal/ceres/sparse_cholesky.h
+++ b/internal/ceres/sparse_cholesky.h
@@ -62,11 +62,12 @@
 //
 //  CompressedRowSparseMatrix lhs = ...;
 //  std::string message;
-//  CHECK_EQ(sparse_cholesky->Factorize(&lhs, &message), LINEAR_SOLVER_SUCCESS);
+//  CHECK_EQ(sparse_cholesky->Factorize(&lhs, &message),
+//  LinearSolverTerminationType::SUCCESS);
 //  Vector rhs = ...;
 //  Vector solution = ...;
 //  CHECK_EQ(sparse_cholesky->Solve(rhs.data(), solution.data(), &message),
-//           LINEAR_SOLVER_SUCCESS);
+//           LinearSolverTerminationType::SUCCESS);
 
 class CERES_NO_EXPORT SparseCholesky {
  public:
@@ -104,7 +105,7 @@
 
   // Convenience method which combines a call to Factorize and
   // Solve. Solve is only called if Factorize returns
-  // LINEAR_SOLVER_SUCCESS.
+  // LinearSolverTerminationType::SUCCESS.
   LinearSolverTerminationType FactorAndSolve(CompressedRowSparseMatrix* lhs,
                                              const double* rhs,
                                              double* solution,
diff --git a/internal/ceres/sparse_cholesky_test.cc b/internal/ceres/sparse_cholesky_test.cc
index 00480f4..c9c8363 100644
--- a/internal/ceres/sparse_cholesky_test.cc
+++ b/internal/ceres/sparse_cholesky_test.cc
@@ -81,7 +81,8 @@
                                     Vector* solution) {
   Matrix eigen_lhs;
   lhs.ToDenseMatrix(&eigen_lhs);
-  if (lhs.storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+  if (lhs.storage_type() ==
+      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
     Matrix full_lhs = eigen_lhs.selfadjointView<Eigen::Upper>();
     Eigen::LLT<Matrix, Eigen::Upper> llt =
         eigen_lhs.selfadjointView<Eigen::Upper>().llt();
@@ -113,7 +114,8 @@
   LinearSolver::Options sparse_cholesky_options;
   sparse_cholesky_options.sparse_linear_algebra_library_type =
       sparse_linear_algebra_library_type;
-  sparse_cholesky_options.use_postordering = (ordering_type == AMD);
+  sparse_cholesky_options.use_postordering =
+      (ordering_type == OrderingType::AMD);
   auto sparse_cholesky = SparseCholesky::Create(sparse_cholesky_options);
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky->StorageType();
@@ -137,7 +139,7 @@
   std::string message;
   EXPECT_EQ(
       sparse_cholesky->FactorAndSolve(lhs, rhs.data(), actual.data(), &message),
-      LINEAR_SOLVER_SUCCESS);
+      LinearSolverTerminationType::SUCCESS);
   Matrix eigen_lhs;
   lhs->ToDenseMatrix(&eigen_lhs);
   EXPECT_NEAR((actual - expected).norm() / actual.norm(),
@@ -154,7 +156,8 @@
   Param param = info.param;
   std::stringstream ss;
   ss << SparseLinearAlgebraLibraryTypeToString(::testing::get<0>(param)) << "_"
-     << (::testing::get<1>(param) == AMD ? "AMD" : "NATURAL") << "_"
+     << (::testing::get<1>(param) == OrderingType::AMD ? "AMD" : "NATURAL")
+     << "_"
      << (::testing::get<2>(param) ? "UseBlockStructure" : "NoBlockStructure");
   return ss.str();
 }
@@ -190,21 +193,25 @@
 namespace {
 
 #ifndef CERES_NO_SUITESPARSE
-INSTANTIATE_TEST_SUITE_P(SuiteSparseCholesky,
-                         SparseCholeskyTest,
-                         ::testing::Combine(::testing::Values(SUITE_SPARSE),
-                                            ::testing::Values(AMD, NATURAL),
-                                            ::testing::Values(true, false)),
-                         ParamInfoToString);
+INSTANTIATE_TEST_SUITE_P(
+    SuiteSparseCholesky,
+    SparseCholeskyTest,
+    ::testing::Combine(::testing::Values(SUITE_SPARSE),
+                       ::testing::Values(OrderingType::AMD,
+                                         OrderingType::NATURAL),
+                       ::testing::Values(true, false)),
+    ParamInfoToString);
 #endif
 
 #ifndef CERES_NO_CXSPARSE
-INSTANTIATE_TEST_SUITE_P(CXSparseCholesky,
-                         SparseCholeskyTest,
-                         ::testing::Combine(::testing::Values(CX_SPARSE),
-                                            ::testing::Values(AMD, NATURAL),
-                                            ::testing::Values(true, false)),
-                         ParamInfoToString);
+INSTANTIATE_TEST_SUITE_P(
+    CXSparseCholesky,
+    SparseCholeskyTest,
+    ::testing::Combine(::testing::Values(CX_SPARSE),
+                       ::testing::Values(OrderingType::AMD,
+                                         OrderingType::NATURAL),
+                       ::testing::Values(true, false)),
+    ParamInfoToString);
 #endif
 
 #ifndef CERES_NO_ACCELERATE_SPARSE
@@ -212,7 +219,8 @@
     AccelerateSparseCholesky,
     SparseCholeskyTest,
     ::testing::Combine(::testing::Values(ACCELERATE_SPARSE),
-                       ::testing::Values(AMD, NATURAL),
+                       ::testing::Values(OrderingType::AMD,
+                                         OrderingType::NATURAL),
                        ::testing::Values(true, false)),
     ParamInfoToString);
 
@@ -220,25 +228,30 @@
     AccelerateSparseCholeskySingle,
     SparseCholeskyTest,
     ::testing::Combine(::testing::Values(ACCELERATE_SPARSE),
-                       ::testing::Values(AMD, NATURAL),
+                       ::testing::Values(OrderingType::AMD,
+                                         OrderingType::NATURAL),
                        ::testing::Values(true, false)),
     ParamInfoToString);
 #endif
 
 #ifdef CERES_USE_EIGEN_SPARSE
-INSTANTIATE_TEST_SUITE_P(EigenSparseCholesky,
-                         SparseCholeskyTest,
-                         ::testing::Combine(::testing::Values(EIGEN_SPARSE),
-                                            ::testing::Values(AMD, NATURAL),
-                                            ::testing::Values(true, false)),
-                         ParamInfoToString);
+INSTANTIATE_TEST_SUITE_P(
+    EigenSparseCholesky,
+    SparseCholeskyTest,
+    ::testing::Combine(::testing::Values(EIGEN_SPARSE),
+                       ::testing::Values(OrderingType::AMD,
+                                         OrderingType::NATURAL),
+                       ::testing::Values(true, false)),
+    ParamInfoToString);
 
-INSTANTIATE_TEST_SUITE_P(EigenSparseCholeskySingle,
-                         SparseCholeskyTest,
-                         ::testing::Combine(::testing::Values(EIGEN_SPARSE),
-                                            ::testing::Values(AMD, NATURAL),
-                                            ::testing::Values(true, false)),
-                         ParamInfoToString);
+INSTANTIATE_TEST_SUITE_P(
+    EigenSparseCholeskySingle,
+    SparseCholeskyTest,
+    ::testing::Combine(::testing::Values(EIGEN_SPARSE),
+                       ::testing::Values(OrderingType::AMD,
+                                         OrderingType::NATURAL),
+                       ::testing::Values(true, false)),
+    ParamInfoToString);
 #endif
 
 class MockSparseCholesky : public SparseCholesky {
@@ -271,14 +284,15 @@
   auto* mock_iterative_refiner = new MockIterativeRefiner;
   EXPECT_CALL(*mock_sparse_cholesky, StorageType())
       .Times(1)
-      .WillRepeatedly(Return(CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+      .WillRepeatedly(
+          Return(CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR));
   EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _)).Times(0);
   std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
   std::unique_ptr<IterativeRefiner> iterative_refiner(mock_iterative_refiner);
   RefinedSparseCholesky refined_sparse_cholesky(std::move(sparse_cholesky),
                                                 std::move(iterative_refiner));
   EXPECT_EQ(refined_sparse_cholesky.StorageType(),
-            CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+            CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
 };
 
 TEST(RefinedSparseCholesky, Factorize) {
@@ -286,7 +300,7 @@
   auto* mock_iterative_refiner = new MockIterativeRefiner;
   EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
       .Times(1)
-      .WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
+      .WillRepeatedly(Return(LinearSolverTerminationType::SUCCESS));
   EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _)).Times(0);
   std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
   std::unique_ptr<IterativeRefiner> iterative_refiner(mock_iterative_refiner);
@@ -295,7 +309,7 @@
   CompressedRowSparseMatrix m(1, 1, 1);
   std::string message;
   EXPECT_EQ(refined_sparse_cholesky.Factorize(&m, &message),
-            LINEAR_SOLVER_SUCCESS);
+            LinearSolverTerminationType::SUCCESS);
 };
 
 TEST(RefinedSparseCholesky, FactorAndSolveWithUnsuccessfulFactorization) {
@@ -303,7 +317,7 @@
   auto* mock_iterative_refiner = new MockIterativeRefiner;
   EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
       .Times(1)
-      .WillRepeatedly(Return(LINEAR_SOLVER_FAILURE));
+      .WillRepeatedly(Return(LinearSolverTerminationType::FAILURE));
   EXPECT_CALL(*mock_sparse_cholesky, Solve(_, _, _)).Times(0);
   EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _)).Times(0);
   std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
@@ -316,7 +330,7 @@
   double solution;
   EXPECT_EQ(
       refined_sparse_cholesky.FactorAndSolve(&m, &rhs, &solution, &message),
-      LINEAR_SOLVER_FAILURE);
+      LinearSolverTerminationType::FAILURE);
 };
 
 TEST(RefinedSparseCholesky, FactorAndSolveWithSuccess) {
@@ -325,10 +339,10 @@
       new MockIterativeRefiner);
   EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
       .Times(1)
-      .WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
+      .WillRepeatedly(Return(LinearSolverTerminationType::SUCCESS));
   EXPECT_CALL(*mock_sparse_cholesky, Solve(_, _, _))
       .Times(1)
-      .WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
+      .WillRepeatedly(Return(LinearSolverTerminationType::SUCCESS));
   EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _)).Times(1);
 
   std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
@@ -342,7 +356,7 @@
   double solution;
   EXPECT_EQ(
       refined_sparse_cholesky.FactorAndSolve(&m, &rhs, &solution, &message),
-      LINEAR_SOLVER_SUCCESS);
+      LinearSolverTerminationType::SUCCESS);
 };
 
 }  // namespace
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index b5030c7..949991b 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -63,7 +63,7 @@
   EventLogger event_logger("SparseNormalCholeskySolver::Solve");
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.termination_type = LinearSolverTerminationType::SUCCESS;
   summary.message = "Success.";
 
   const int num_cols = A->num_cols();
diff --git a/internal/ceres/sparse_normal_cholesky_solver_test.cc b/internal/ceres/sparse_normal_cholesky_solver_test.cc
index eaf2dbb..6b3316c 100644
--- a/internal/ceres/sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver_test.cc
@@ -86,7 +86,7 @@
     summary = solver->Solve(
         A_.get(), b_.get(), per_solve_options, actual_solution.data());
 
-    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+    EXPECT_EQ(summary.termination_type, LinearSolverTerminationType::SUCCESS);
 
     for (int i = 0; i < A_->num_cols(); ++i) {
       EXPECT_NEAR(expected_solution(i), actual_solution(i), 1e-8)
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
index b9202db..c804274 100644
--- a/internal/ceres/subset_preconditioner.cc
+++ b/internal/ceres/subset_preconditioner.cc
@@ -105,7 +105,7 @@
   const LinearSolverTerminationType termination_type =
       sparse_cholesky_->Factorize(inner_product_computer_->mutable_result(),
                                   &message);
-  if (termination_type != LINEAR_SOLVER_SUCCESS) {
+  if (termination_type != LinearSolverTerminationType::SUCCESS) {
     LOG(ERROR) << "Preconditioner factorization failed: " << message;
     return false;
   }
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
index bc517e7..0596bc7 100644
--- a/internal/ceres/subset_preconditioner_test.cc
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -67,7 +67,8 @@
                              Vector* solution) {
   Matrix dense_triangular_lhs;
   lhs.ToDenseMatrix(&dense_triangular_lhs);
-  if (lhs.storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+  if (lhs.storage_type() ==
+      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
     Matrix full_lhs = dense_triangular_lhs.selfadjointView<Eigen::Upper>();
     return SolveLinearSystemUsingEigen<Eigen::Upper>(full_lhs, rhs, solution);
   }
@@ -120,7 +121,7 @@
     // either case the preconditioner matrix is b_' b + D'D.
     b_->AppendRows(*block_diagonal_);
     inner_product_computer_ = InnerProductComputer::Create(
-        *b_, CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+        *b_, CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
     inner_product_computer_->Compute();
   }
 
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 4eba49e..7dffe1c 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -102,9 +102,11 @@
   m.x = reinterpret_cast<void*>(A->mutable_values());
   m.z = nullptr;
 
-  if (A->storage_type() == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+  if (A->storage_type() ==
+      CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR) {
     m.stype = 1;
-  } else if (A->storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+  } else if (A->storage_type() ==
+             CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
     m.stype = -1;
   } else {
     m.stype = 0;
@@ -281,43 +283,43 @@
   switch (cc_.status) {
     case CHOLMOD_NOT_INSTALLED:
       *message = "CHOLMOD failure: Method not installed.";
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     case CHOLMOD_OUT_OF_MEMORY:
       *message = "CHOLMOD failure: Out of memory.";
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     case CHOLMOD_TOO_LARGE:
       *message = "CHOLMOD failure: Integer overflow occurred.";
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     case CHOLMOD_INVALID:
       *message = "CHOLMOD failure: Invalid input.";
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     case CHOLMOD_NOT_POSDEF:
       *message = "CHOLMOD warning: Matrix not positive definite.";
-      return LINEAR_SOLVER_FAILURE;
+      return LinearSolverTerminationType::FAILURE;
     case CHOLMOD_DSMALL:
       *message =
           "CHOLMOD warning: D for LDL' or diag(L) or "
           "LL' has tiny absolute value.";
-      return LINEAR_SOLVER_FAILURE;
+      return LinearSolverTerminationType::FAILURE;
     case CHOLMOD_OK:
       if (cholmod_status != 0) {
-        return LINEAR_SOLVER_SUCCESS;
+        return LinearSolverTerminationType::SUCCESS;
       }
 
       *message =
           "CHOLMOD failure: cholmod_factorize returned false "
           "but cholmod_common::status is CHOLMOD_OK."
           "Please report this to ceres-solver@googlegroups.com.";
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     default:
       *message = StringPrintf(
           "Unknown cholmod return code: %d. "
           "Please report this to ceres-solver@googlegroups.com.",
           cc_.status);
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
   }
 
-  return LINEAR_SOLVER_FATAL_ERROR;
+  return LinearSolverTerminationType::FATAL_ERROR;
 }
 
 cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
@@ -381,13 +383,13 @@
     CompressedRowSparseMatrix* lhs, string* message) {
   if (lhs == nullptr) {
     *message = "Failure: Input lhs is nullptr.";
-    return LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
 
   cholmod_sparse cholmod_lhs = ss_.CreateSparseMatrixTransposeView(lhs);
 
   if (factor_ == nullptr) {
-    if (ordering_type_ == NATURAL) {
+    if (ordering_type_ == OrderingType::NATURAL) {
       factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&cholmod_lhs, message);
     } else {
       if (!lhs->col_blocks().empty() && !(lhs->row_blocks().empty())) {
@@ -399,7 +401,7 @@
     }
 
     if (factor_ == nullptr) {
-      return LINEAR_SOLVER_FATAL_ERROR;
+      return LinearSolverTerminationType::FATAL_ERROR;
     }
   }
 
@@ -408,9 +410,9 @@
 
 CompressedRowSparseMatrix::StorageType SuiteSparseCholesky::StorageType()
     const {
-  return ((ordering_type_ == NATURAL)
-              ? CompressedRowSparseMatrix::UPPER_TRIANGULAR
-              : CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+  return ((ordering_type_ == OrderingType::NATURAL)
+              ? CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR
+              : CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR);
 }
 
 LinearSolverTerminationType SuiteSparseCholesky::Solve(const double* rhs,
@@ -419,7 +421,7 @@
   // Error checking
   if (factor_ == nullptr) {
     *message = "Solve called without a call to Factorize first.";
-    return LINEAR_SOLVER_FATAL_ERROR;
+    return LinearSolverTerminationType::FATAL_ERROR;
   }
 
   const int num_cols = factor_->n;
@@ -428,12 +430,12 @@
       ss_.Solve(factor_, &cholmod_rhs, message);
 
   if (cholmod_dense_solution == nullptr) {
-    return LINEAR_SOLVER_FAILURE;
+    return LinearSolverTerminationType::FAILURE;
   }
 
   memcpy(solution, cholmod_dense_solution->x, num_cols * sizeof(*solution));
   ss_.Free(cholmod_dense_solution);
-  return LINEAR_SOLVER_SUCCESS;
+  return LinearSolverTerminationType::SUCCESS;
 }
 
 }  // namespace ceres::internal
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index bf51f96..739304a 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -356,13 +356,13 @@
 // Compute the trust region step using the TrustRegionStrategy chosen
 // by the user.
 //
-// If the strategy returns with LINEAR_SOLVER_FATAL_ERROR, which
+// If the strategy returns with LinearSolverTerminationType::FATAL_ERROR, which
 // indicates an unrecoverable error, return false. This is the only
 // condition that returns false.
 //
-// If the strategy returns with LINEAR_SOLVER_FAILURE, which indicates
-// a numerical failure that could be recovered from by retrying
-// (e.g. by increasing the strength of the regularization), we set
+// If the strategy returns with LinearSolverTerminationType::FAILURE, which
+// indicates a numerical failure that could be recovered from by retrying (e.g.
+// by increasing the strength of the regularization), we set
 // iteration_summary_.step_is_valid to false and return true.
 //
 // In all other cases, we compute the decrease in the trust region
@@ -394,7 +394,8 @@
                              residuals_.data(),
                              trust_region_step_.data());
 
-  if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+  if (strategy_summary.termination_type ==
+      LinearSolverTerminationType::FATAL_ERROR) {
     solver_summary_->message =
         "Linear solver failed due to unrecoverable "
         "non-numeric causes. Please see the error log for clues. ";
@@ -406,7 +407,8 @@
       WallTimeInSeconds() - strategy_start_time;
   iteration_summary_.linear_solver_iterations = strategy_summary.num_iterations;
 
-  if (strategy_summary.termination_type == LINEAR_SOLVER_FAILURE) {
+  if (strategy_summary.termination_type ==
+      LinearSolverTerminationType::FAILURE) {
     return true;
   }
 
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 1b04ceb..334f06f 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -111,7 +111,8 @@
     int num_iterations = -1;
 
     // Status of the linear solver used to solve the Newton system.
-    LinearSolverTerminationType termination_type = LINEAR_SOLVER_FAILURE;
+    LinearSolverTerminationType termination_type =
+        LinearSolverTerminationType::FAILURE;
   };
 
   // Use the current radius to solve for the trust region step.
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index e725529..f04442d 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -353,7 +353,7 @@
   // scaling is not needed, which is quite often in our experience.
   LinearSolverTerminationType status = Factorize();
 
-  if (status == LINEAR_SOLVER_FATAL_ERROR) {
+  if (status == LinearSolverTerminationType::FATAL_ERROR) {
     return false;
   }
 
@@ -362,7 +362,8 @@
   // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI
   // case, the preconditioner is guaranteed to be positive
   // semidefinite.
-  if (status == LINEAR_SOLVER_FAILURE && options_.type == CLUSTER_TRIDIAGONAL) {
+  if (status == LinearSolverTerminationType::FAILURE &&
+      options_.type == CLUSTER_TRIDIAGONAL) {
     VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
             << "scaling";
     ScaleOffDiagonalCells();
@@ -370,7 +371,7 @@
   }
 
   VLOG(2) << "Compute time: " << time(nullptr) - start_time;
-  return (status == LINEAR_SOLVER_SUCCESS);
+  return (status == LinearSolverTerminationType::SUCCESS);
 }
 
 // Consider the preconditioner matrix as meta-block matrix, whose
@@ -413,12 +414,15 @@
   std::unique_ptr<CompressedRowSparseMatrix> lhs;
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky_->StorageType();
-  if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+  if (storage_type ==
+      CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR) {
     lhs = CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
-    lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+    lhs->set_storage_type(
+        CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
   } else {
     lhs = CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm);
-    lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+    lhs->set_storage_type(
+        CompressedRowSparseMatrix::StorageType::LOWER_TRIANGULAR);
   }
 
   std::string message;