SuiteSparse errors do not cause a fatal crash.

1. Move LinearSolverTerminationType to ceres::internal.
2. Add FATAL_ERROR as a new enum to LinearSolverTerminationType.
3. Pipe SuiteSparse errors via a LinearSolverTerminationType so
   to distinguish between fatal and non-fatal errors.
4. Update levenberg marquardt and dogleg strategies to deal
   with FATAL_ERROR.
5. Update trust_region_minimizer to terminate when FATAL_ERROR
   is encountered.
6. Remove SuiteSparse::SolveCholesky as it screws up the error
   handling.
7. Fix all clients calling SuiteSparse to handle the result of
   SuiteSparse::Cholesky correctly.
8. Remove fatal failures in SuiteSparse when symbolic factorization
   fails.
9. Fix all clients of SuiteSparse to deal with null symbolic factors.

This is a temporary fix to deal with some production problems. A more
extensive cleanup and testing regime will be put in place in a
subsequent CL.

Change-Id: I1f60d539799dd95db7ecc340911e261fa4824f92
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index d39508e..c302181 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -443,28 +443,32 @@
 
   cholmod_factor* factor = ss.AnalyzeCholesky(&cholmod_jacobian_view);
   event_logger.AddEvent("Symbolic Factorization");
-  bool factorization_succeeded = ss.Cholesky(&cholmod_jacobian_view, factor);
-  if (factorization_succeeded) {
-    const double reciprocal_condition_number =
-        cholmod_rcond(factor, ss.mutable_cc());
-    if (reciprocal_condition_number <
-        options_.min_reciprocal_condition_number) {
-      LOG(WARNING) << "Cholesky factorization of J'J is not reliable. "
-                   << "Reciprocal condition number: "
-                   << reciprocal_condition_number << " "
-                   << "min_reciprocal_condition_number : "
-                   << options_.min_reciprocal_condition_number;
-      factorization_succeeded = false;
-    }
+  if (factor == NULL) {
+    return false;
   }
 
+  LinearSolverTerminationType termination_type = ss.Cholesky(&cholmod_jacobian_view, factor);
   event_logger.AddEvent("Numeric Factorization");
-  if (!factorization_succeeded) {
-    ss.Free(factor);
+
+  if (termination_type != TOLERANCE) {
     LOG(WARNING) << "Cholesky factorization failed.";
     return false;
   }
 
+  const double reciprocal_condition_number =
+      cholmod_rcond(factor, ss.mutable_cc());
+
+  if (reciprocal_condition_number <
+      options_.min_reciprocal_condition_number) {
+    LOG(WARNING) << "Cholesky factorization of J'J is not reliable. "
+                 << "Reciprocal condition number: "
+                 << reciprocal_condition_number << " "
+                 << "min_reciprocal_condition_number : "
+                 << options_.min_reciprocal_condition_number;
+    ss.Free(factor);
+    return false;
+  }
+
   const int num_rows = covariance_matrix_->num_rows();
   const int* rows = covariance_matrix_->rows();
   const int* cols = covariance_matrix_->cols();
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index c85c8e5..d2cab86 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -135,6 +135,10 @@
   summary.num_iterations = linear_solver_summary.num_iterations;
   summary.termination_type = linear_solver_summary.termination_type;
 
+  if (linear_solver_summary.termination_type == FATAL_ERROR) {
+    return summary;
+  }
+
   if (linear_solver_summary.termination_type != FAILURE) {
     switch (dogleg_type_) {
       // Interpolate the Cauchy point and the Gauss-Newton step.
@@ -579,6 +583,10 @@
       }
     }
 
+    if (linear_solver_summary.termination_type == FATAL_ERROR) {
+      return linear_solver_summary;
+    }
+
     if (linear_solver_summary.termination_type == FAILURE ||
         !IsArrayValid(n, gauss_newton_step_.data())) {
       mu_ *= mu_increase_factor_;
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index fad7c1f..009a951 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -105,8 +105,11 @@
   // do not need to be modified.
   LinearSolver::Summary linear_solver_summary =
       linear_solver_->Solve(jacobian, residuals, solve_options, step);
-  if (linear_solver_summary.termination_type == FAILURE ||
-      !IsArrayValid(num_parameters, step)) {
+
+  if (linear_solver_summary.termination_type == FATAL_ERROR) {
+    LOG(WARNING) << "Linear solver fatal error.";
+  } else  if (linear_solver_summary.termination_type == FAILURE ||
+              !IsArrayValid(num_parameters, step)) {
     LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
     linear_solver_summary.termination_type = FAILURE;
   } else {
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index b0ab80d..4af586a 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -50,6 +50,30 @@
 namespace ceres {
 namespace internal {
 
+enum LinearSolverTerminationType {
+  // Termination criterion was met. For factorization based solvers
+  // the tolerance is assumed to be zero. Any user provided values are
+  // ignored.
+  TOLERANCE,
+
+  // Solver ran for max_num_iterations and terminated before the
+  // termination tolerance could be satified.
+  MAX_ITERATIONS,
+
+  // Solver is stuck and further iterations will not result in any
+  // measurable progress.
+  STAGNATION,
+
+  // Solver failed. Solver was terminated due to numerical errors. The
+  // exact cause of failure depends on the particular solver being
+  // used.
+  FAILURE,
+
+  // Solver failed with a fatal error that cannot be recovered from.
+  FATAL_ERROR
+};
+
+
 class LinearOperator;
 
 // Abstract base class for objects that implement algorithms for
@@ -251,6 +275,7 @@
     double residual_norm;
     int num_iterations;
     LinearSolverTerminationType termination_type;
+    string status;
   };
 
   virtual ~LinearSolver();
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index b192aa1..91dd455 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -82,10 +82,10 @@
   event_logger.AddEvent("Eliminate");
 
   double* reduced_solution = x + A->num_cols() - lhs_->num_cols();
-  const bool status = SolveReducedLinearSystem(reduced_solution);
+  summary.termination_type = SolveReducedLinearSystem(reduced_solution);
   event_logger.AddEvent("ReducedSolve");
 
-  if (!status) {
+  if (summary.termination_type != TOLERANCE) {
     return summary;
   }
 
@@ -117,7 +117,8 @@
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessDenseMatrix. The linear system is solved using
 // Eigen's Cholesky factorization.
-bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+LinearSolverTerminationType
+DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
   const BlockRandomAccessDenseMatrix* m =
       down_cast<const BlockRandomAccessDenseMatrix*>(lhs());
   const int num_rows = m->num_rows();
@@ -125,7 +126,7 @@
   // The case where there are no f blocks, and the system is block
   // diagonal.
   if (num_rows == 0) {
-    return true;
+    return TOLERANCE;
   }
 
   if (options().dense_linear_algebra_library_type == EIGEN) {
@@ -136,14 +137,18 @@
         .selfadjointView<Eigen::Upper>()
         .llt()
         .solve(ConstVectorRef(rhs(), num_rows));
-    return true;
+    return TOLERANCE;
   }
 
   VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
   const int info = LAPACK::SolveInPlaceUsingCholesky(num_rows,
                                                      m->values(),
                                                      solution);
-  return (info == 0);
+  if (info == 0) {
+    return TOLERANCE;
+  } else {
+    return FAILURE;
+  }
 }
 
 #if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
@@ -242,7 +247,8 @@
   set_rhs(new double[lhs()->num_rows()]);
 }
 
-bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+LinearSolverTerminationType
+SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
   switch (options().sparse_linear_algebra_library_type) {
     case SUITE_SPARSE:
       return SolveReducedLinearSystemUsingSuiteSparse(solution);
@@ -255,14 +261,15 @@
 
   LOG(FATAL) << "Unknown sparse linear algebra library : "
              << options().sparse_linear_algebra_library_type;
-  return false;
+  return FATAL_ERROR;
 }
 
 #ifndef CERES_NO_SUITESPARSE
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessSparseMatrix.  The linear system is solved using
 // CHOLMOD's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+LinearSolverTerminationType
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
     double* solution) {
   TripletSparseMatrix* tsm =
       const_cast<TripletSparseMatrix*>(
@@ -273,7 +280,7 @@
   // The case where there are no f blocks, and the system is block
   // diagonal.
   if (num_rows == 0) {
-    return true;
+    return TOLERANCE;
   }
 
   cholmod_sparse* cholmod_lhs = NULL;
@@ -305,29 +312,39 @@
     }
   }
 
+  if (factor_ == NULL) {
+    ss_.Free(cholmod_lhs);
+    return FATAL_ERROR;
+  }
+
   cholmod_dense*  cholmod_rhs =
       ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
-  cholmod_dense* cholmod_solution =
-      ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs);
 
+  LinearSolverTerminationType status = ss_.Cholesky(cholmod_lhs, factor_);
+  if (status != TOLERANCE) {
+    return status;
+  }
+
+  cholmod_dense* cholmod_solution = ss_.Solve(factor_, cholmod_rhs);
   ss_.Free(cholmod_lhs);
   ss_.Free(cholmod_rhs);
 
   if (cholmod_solution == NULL) {
     LOG(WARNING) << "CHOLMOD solve failed.";
-    return false;
+    return FAILURE;
   }
 
   VectorRef(solution, num_rows)
       = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
   ss_.Free(cholmod_solution);
-  return true;
+  return TOLERANCE;
 }
 #else
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+LinearSolverTerminationType
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
     double* solution) {
   LOG(FATAL) << "No SuiteSparse support in Ceres.";
-  return false;
+  return FATAL_ERROR;
 }
 #endif  // CERES_NO_SUITESPARSE
 
@@ -335,7 +352,8 @@
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessSparseMatrix.  The linear system is solved using
 // CXSparse's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+LinearSolverTerminationType
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
     double* solution) {
   // Extract the TripletSparseMatrix that is used for actually storing S.
   TripletSparseMatrix* tsm =
@@ -347,7 +365,7 @@
   // The case where there are no f blocks, and the system is block
   // diagonal.
   if (num_rows == 0) {
-    return true;
+    return TOLERANCE;
   }
 
   cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm));
@@ -363,13 +381,18 @@
   bool ok = cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution);
 
   cxsparse_.Free(lhs);
-  return ok;
+  if (ok) {
+    return TOLERANCE;
+  } else {
+    return FAILURE;
+  }
 }
 #else
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+LinearSolverTerminationType
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
     double* solution) {
   LOG(FATAL) << "No CXSparse support in Ceres.";
-  return false;
+  return FATAL_ERROR;
 }
 #endif  // CERES_NO_CXPARSE
 
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
index b5a1c74..3303205 100644
--- a/internal/ceres/schur_complement_solver.h
+++ b/internal/ceres/schur_complement_solver.h
@@ -126,7 +126,8 @@
 
  private:
   virtual void InitStorage(const CompressedRowBlockStructure* bs) = 0;
-  virtual bool SolveReducedLinearSystem(double* solution) = 0;
+  virtual LinearSolverTerminationType SolveReducedLinearSystem(
+      double* solution) = 0;
 
   LinearSolver::Options options_;
 
@@ -146,7 +147,8 @@
 
  private:
   virtual void InitStorage(const CompressedRowBlockStructure* bs);
-  virtual bool SolveReducedLinearSystem(double* solution);
+  virtual LinearSolverTerminationType SolveReducedLinearSystem(
+      double* solution);
 
   CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
 };
@@ -160,9 +162,12 @@
 
  private:
   virtual void InitStorage(const CompressedRowBlockStructure* bs);
-  virtual bool SolveReducedLinearSystem(double* solution);
-  bool SolveReducedLinearSystemUsingSuiteSparse(double* solution);
-  bool SolveReducedLinearSystemUsingCXSparse(double* solution);
+  virtual LinearSolverTerminationType SolveReducedLinearSystem(
+      double* solution);
+  LinearSolverTerminationType SolveReducedLinearSystemUsingSuiteSparse(
+      double* solution);
+  LinearSolverTerminationType SolveReducedLinearSystemUsingCXSparse(
+      double* solution);
 
   // Size of the blocks in the Schur complement.
   vector<int> blocks_;
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index f1a5237..697adc1 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -195,34 +195,50 @@
   VectorRef(x, num_cols).setZero();
 
   cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
-  cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
+
   event_logger.AddEvent("Setup");
 
   if (factor_ == NULL) {
     if (options_.use_postordering) {
-      factor_ =
-          CHECK_NOTNULL(ss_.BlockAnalyzeCholesky(&lhs,
-                                                 A->col_blocks(),
-                                                 A->row_blocks()));
+      factor_ = ss_.BlockAnalyzeCholesky(&lhs,
+                                         A->col_blocks(),
+                                         A->row_blocks());
     } else {
-      factor_ =
-      CHECK_NOTNULL(ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs));
+      factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs);
     }
   }
-
   event_logger.AddEvent("Analysis");
 
-  cholmod_dense* sol = ss_.SolveCholesky(&lhs, factor_, rhs);
+  if (factor_ == NULL) {
+    if (per_solve_options.D != NULL) {
+      A->DeleteRows(num_cols);
+    }
+
+    summary.termination_type = FATAL_ERROR;
+    return summary;
+  }
+
+  const LinearSolverTerminationType status = ss_.Cholesky(&lhs, factor_);
+  if (status != TOLERANCE) {
+    if (per_solve_options.D != NULL) {
+      A->DeleteRows(num_cols);
+    }
+
+    summary.termination_type = FATAL_ERROR;
+    return summary;
+  }
+
+  cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
+  cholmod_dense* sol = ss_.Solve(factor_, rhs);
   event_logger.AddEvent("Solve");
 
   ss_.Free(rhs);
-  rhs = NULL;
-
   if (per_solve_options.D != NULL) {
     A->DeleteRows(num_cols);
   }
 
   summary.num_iterations = 1;
+
   if (sol != NULL) {
     memcpy(x, sol->x, num_cols * sizeof(*x));
 
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 9de32fd..ec93b1a 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -35,6 +35,7 @@
 #include "cholmod.h"
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 #include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
 #include "ceres/triplet_sparse_matrix.h"
 
 namespace ceres {
@@ -130,8 +131,11 @@
   cc_.supernodal = CHOLMOD_AUTO;
 
   cholmod_factor* factor = cholmod_analyze(A, &cc_);
-  CHECK_EQ(cc_.status, CHOLMOD_OK)
-      << "Cholmod symbolic analysis failed " << cc_.status;
+  if (cc_.status != CHOLMOD_OK) {
+    LOG(ERROR) << "cholmod_analyze failed. error code: " << cc_.status;
+    return NULL;
+  }
+
   CHECK_NOTNULL(factor);
 
   if (VLOG_IS_ON(2)) {
@@ -162,8 +166,11 @@
 
   cholmod_factor* factor  =
       cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
-  CHECK_EQ(cc_.status, CHOLMOD_OK)
-      << "Cholmod symbolic analysis failed " << cc_.status;
+  if (cc_.status != CHOLMOD_OK) {
+    LOG(ERROR) << "cholmod_analyze failed. error code: " << cc_.status;
+    return NULL;
+  }
+
   CHECK_NOTNULL(factor);
 
   if (VLOG_IS_ON(2)) {
@@ -180,8 +187,11 @@
   cc_.postorder = 0;
 
   cholmod_factor* factor  = cholmod_analyze(A, &cc_);
-  CHECK_EQ(cc_.status, CHOLMOD_OK)
-      << "Cholmod symbolic analysis failed " << cc_.status;
+  if (cc_.status != CHOLMOD_OK) {
+    LOG(ERROR) << "cholmod_analyze failed. error code: " << cc_.status;
+    return NULL;
+  }
+
   CHECK_NOTNULL(factor);
 
   if (VLOG_IS_ON(2)) {
@@ -233,7 +243,7 @@
   return true;
 }
 
-bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
+LinearSolverTerminationType SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
   CHECK_NOTNULL(A);
   CHECK_NOTNULL(L);
 
@@ -258,40 +268,37 @@
   switch (cc_.status) {
     case CHOLMOD_NOT_INSTALLED:
       LOG(WARNING) << "CHOLMOD failure: Method not installed.";
-      return false;
+      return FATAL_ERROR;
     case CHOLMOD_OUT_OF_MEMORY:
       LOG(WARNING) << "CHOLMOD failure: Out of memory.";
-      return false;
+      return FATAL_ERROR;
     case CHOLMOD_TOO_LARGE:
       LOG(WARNING) << "CHOLMOD failure: Integer overflow occured.";
-      return false;
+      return FATAL_ERROR;
     case CHOLMOD_INVALID:
       LOG(WARNING) << "CHOLMOD failure: Invalid input.";
-      return false;
+      return FATAL_ERROR;
     case CHOLMOD_NOT_POSDEF:
-      // TODO(sameeragarwal): These two warnings require more
-      // sophisticated handling going forward. For now we will be
-      // strict and treat them as failures.
       LOG(WARNING) << "CHOLMOD warning: Matrix not positive definite.";
-      return false;
+      return FAILURE;
     case CHOLMOD_DSMALL:
       LOG(WARNING) << "CHOLMOD warning: D for LDL' or diag(L) or "
                    << "LL' has tiny absolute value.";
-      return false;
+      return FAILURE;
     case CHOLMOD_OK:
       if (status != 0) {
-        return true;
+        return TOLERANCE;
       }
       LOG(WARNING) << "CHOLMOD failure: cholmod_factorize returned zero "
                    << "but cholmod_common::status is CHOLMOD_OK."
                    << "Please report this to ceres-solver@googlegroups.com.";
-      return false;
+      return FATAL_ERROR;
     default:
-      LOG(WARNING) << "Unknown cholmod return code. "
-                   << "Please report this to ceres-solver@googlegroups.com.";
-      return false;
+      LOG(WARNING) << "Unknown cholmod return code: " << cc_.status
+                   << ". Please report this to ceres-solver@googlegroups.com.";
+      return FATAL_ERROR;
   }
-  return false;
+  return FATAL_ERROR;
 }
 
 cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
@@ -304,20 +311,6 @@
   return cholmod_solve(CHOLMOD_A, L, b, &cc_);
 }
 
-cholmod_dense* SuiteSparse::SolveCholesky(cholmod_sparse* A,
-                                          cholmod_factor* L,
-                                          cholmod_dense* b) {
-  CHECK_NOTNULL(A);
-  CHECK_NOTNULL(L);
-  CHECK_NOTNULL(b);
-
-  if (Cholesky(A, L)) {
-    return Solve(L, b);
-  }
-
-  return NULL;
-}
-
 void SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
                                                    int* ordering) {
   cholmod_amd(matrix, NULL, 0, ordering, &cc_);
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index 16f298e..c029b6d 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -41,6 +41,7 @@
 #include <vector>
 
 #include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
 #include "cholmod.h"
 #include "glog/logging.h"
 #include "SuiteSparseQR.hpp"
@@ -167,20 +168,13 @@
   // factorization for the matrix A or AA^T. Return true if
   // successful, false otherwise. L contains the numeric factorization
   // on return.
-  bool Cholesky(cholmod_sparse* A, cholmod_factor* L);
+  LinearSolverTerminationType Cholesky(cholmod_sparse* A, cholmod_factor* L);
 
   // Given a Cholesky factorization of a matrix A = LL^T, solve the
   // linear system Ax = b, and return the result. If the Solve fails
   // NULL is returned. Caller owns the result.
   cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b);
 
-  // Combine the calls to Cholesky and Solve into a single call. If
-  // the cholesky factorization or the solve fails, return
-  // NULL. Caller owns the result.
-  cholmod_dense* SolveCholesky(cholmod_sparse* A,
-                               cholmod_factor* L,
-                               cholmod_dense* b);
-
   // By virtue of the modeling layer in Ceres being block oriented,
   // all the matrices used by Ceres are also block oriented. When
   // doing sparse direct factorization of these matrices the
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index ea7ee74..e09c008 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -238,6 +238,13 @@
     iteration_summary.step_is_successful = false;
 
     double model_cost_change = 0.0;
+    if (strategy_summary.termination_type == FATAL_ERROR) {
+      summary->error = "Terminating. Linear solver encountered a fatal error.";
+      LOG_IF(WARNING, is_not_silent) << summary->error;
+      summary->termination_type = NUMERICAL_FAILURE;
+      return;
+    }
+
     if (strategy_summary.termination_type != FAILURE) {
       // new_model_cost
       //  = 1/2 [f + J * step]^2
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 0dcdbfe..3f078d5 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -33,7 +33,7 @@
 
 #include <string>
 #include "ceres/internal/port.h"
-#include "ceres/types.h"
+#include "ceres/linear_solver.h"
 
 namespace ceres {
 namespace internal {
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
index 80f8d39..5f3455f 100644
--- a/internal/ceres/types.cc
+++ b/internal/ceres/types.cc
@@ -312,18 +312,6 @@
   }
 }
 
-const char* LinearSolverTerminationTypeToString(
-    LinearSolverTerminationType type) {
-  switch (type) {
-    CASESTR(TOLERANCE);
-    CASESTR(MAX_ITERATIONS);
-    CASESTR(STAGNATION);
-    CASESTR(FAILURE);
-    default:
-      return "UNKNOWN";
-  }
-}
-
 #undef CASESTR
 #undef STRENUM
 
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 8104356..aeaab12 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -368,14 +368,18 @@
   //
   // Doing the factorization like this saves us matrix mass when
   // scaling is not needed, which is quite often in our experience.
-  bool status = Factorize();
+  LinearSolverTerminationType status = Factorize();
+
+  if (status == FATAL_ERROR) {
+    return false;
+  }
 
   // The scaling only affects the tri-diagonal case, since
   // ScaleOffDiagonalBlocks only pays attenion to the cells that
   // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI
   // case, the preconditioner is guaranteed to be positive
   // semidefinite.
-  if (!status && options_.type == CLUSTER_TRIDIAGONAL) {
+  if (status == FAILURE && options_.type == CLUSTER_TRIDIAGONAL) {
     VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
             << "scaling";
     ScaleOffDiagonalCells();
@@ -383,7 +387,7 @@
   }
 
   VLOG(2) << "Compute time: " << time(NULL) - start_time;
-  return status;
+  return (status == TOLERANCE);
 }
 
 // Consider the preconditioner matrix as meta-block matrix, whose
@@ -420,7 +424,7 @@
 
 // Compute the sparse Cholesky factorization of the preconditioner
 // matrix.
-bool VisibilityBasedPreconditioner::Factorize() {
+LinearSolverTerminationType VisibilityBasedPreconditioner::Factorize() {
   // Extract the TripletSparseMatrix that is used for actually storing
   // S and convert it into a cholmod_sparse object.
   cholmod_sparse* lhs = ss_.CreateSparseMatrix(
@@ -436,7 +440,7 @@
     factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_);
   }
 
-  bool status = ss_.Cholesky(lhs, factor_);
+  LinearSolverTerminationType status = ss_.Cholesky(lhs, factor_);
   ss_.Free(lhs);
   return status;
 }
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index c58b1a7..70cea83 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -55,6 +55,7 @@
 #include "ceres/graph.h"
 #include "ceres/internal/macros.h"
 #include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
 #include "ceres/preconditioner.h"
 #include "ceres/suitesparse.h"
 
@@ -147,7 +148,7 @@
   void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs);
   void InitStorage(const CompressedRowBlockStructure& bs);
   void InitEliminator(const CompressedRowBlockStructure& bs);
-  bool Factorize();
+  LinearSolverTerminationType Factorize();
   void ScaleOffDiagonalCells();
 
   void ClusterCameras(const vector< set<int> >& visibility);