Enable mixed precision solves.

1. Add Solver::Options::use_mixed_precision_solves,
   and Solver::Options::max_num_refinement_iterations.
2. Make SparseCholesky::Create return a unique_ptr.
3. SparseCholesky::Create now takes LinearSolver::Options
   as an argument.
4. IterativeRefiner's constructor does not require num_cols
   as an argument.
5. SparseNormalCholeskySolver now uses a separate rhs vector.

This basic implementation results in a 10% reduction in solver time
and 30% reduction in linear solver memory usage.

Change-Id: I6830f32cae2febf082d2733262eb2c9f0482b0ea
diff --git a/examples/bundle_adjuster.cc b/examples/bundle_adjuster.cc
index 319e385..5619c52 100644
--- a/examples/bundle_adjuster.cc
+++ b/examples/bundle_adjuster.cc
@@ -120,6 +120,8 @@
              "the pertubations.");
 DEFINE_bool(line_search, false, "Use a line search instead of trust region "
             "algorithm.");
+DEFINE_bool(mixed_precision_solves, false, "Use mixed precision solves.");
+DEFINE_int32(max_num_refinement_iterations, 0, "Iterative refinement iterations");
 DEFINE_string(initial_ply, "", "Export the BAL file data as a PLY file.");
 DEFINE_string(final_ply, "", "Export the refined BAL file data as a PLY "
               "file.");
@@ -141,6 +143,8 @@
             FLAGS_dense_linear_algebra_library,
             &options->dense_linear_algebra_library_type));
   options->use_explicit_schur_complement = FLAGS_explicit_schur_complement;
+  options->use_mixed_precision_solves = FLAGS_mixed_precision_solves;
+  options->max_num_refinement_iterations = FLAGS_max_num_refinement_iterations;
 }
 
 void SetOrdering(BALProblem* bal_problem, Solver::Options* options) {
diff --git a/include/ceres/solver.h b/include/ceres/solver.h
index 592d329..9fc9ec5 100644
--- a/include/ceres/solver.h
+++ b/include/ceres/solver.h
@@ -497,9 +497,33 @@
     // then it is probably best to keep this false, otherwise it will
     // likely lead to worse performance.
 
-    // This settings affects the SPARSE_NORMAL_CHOLESKY solver.
+    // This settings only affects the SPARSE_NORMAL_CHOLESKY solver.
     bool dynamic_sparsity = false;
 
+    // TODO(sameeragarwal): Further expand the documentation for the
+    // following two options.
+
+    // NOTE1: EXPERIMETAL FEATURE, UNDER DEVELOPMENT, USE AT YOUR OWN RISK.
+    //
+    // If use_mixed_precision_solves is true, the Gauss-Newton matrix
+    // is computed in double precision, but its factorization is
+    // computed in single precision. This can result in significant
+    // time and memory savings at the cost of some accuracy in the
+    // Gauss-Newton step. Iterative refinement is used to recover some
+    // of this accuracy back.
+    //
+    // If use_mixed_precision_solves is true, we recommend setting
+    // max_num_refinement_iterations to 2-3.
+    //
+    // NOTE2: The following two options are currently only applicable
+    // if sparse_linear_algebra_library_type is EIGEN_SPARSE and
+    // linear_solver_type is SPARSE_NORMAL_CHOLESKY, or SPARSE_SCHUR.
+    bool use_mixed_precision_solves = false;
+
+    // Number steps of the iterative refinement process to run when
+    // computing the Gauss-Newton step.
+    int max_num_refinement_iterations = 0;
+
     // Some non-linear least squares problems have additional
     // structure in the way the parameter blocks interact that it is
     // beneficial to modify the way the trust region step is computed.
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index 41dbdff..8b57956 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -266,6 +266,8 @@
   std::fill(values_.begin(), values_.end(), 0);
 }
 
+// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
+// block-aware for higher performance.
 void CompressedRowSparseMatrix::RightMultiply(const double* x,
                                               double* y) const {
   CHECK_NOTNULL(x);
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index 11ff943..5a02877 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -196,8 +196,9 @@
 
 void CXSparse::Free(csn* numeric_factor) { cs_di_nfree(numeric_factor); }
 
-CXSparseCholesky* CXSparseCholesky::Create(const OrderingType ordering_type) {
-  return new CXSparseCholesky(ordering_type);
+std::unique_ptr<SparseCholesky> CXSparseCholesky::Create(
+    const OrderingType ordering_type) {
+  return std::unique_ptr<SparseCholesky>(new CXSparseCholesky(ordering_type));
 }
 
 CompressedRowSparseMatrix::StorageType CXSparseCholesky::StorageType() const {
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index 1ad79f9..1789afd 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -36,6 +36,7 @@
 
 #ifndef CERES_NO_CXSPARSE
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -140,7 +141,7 @@
 class CXSparseCholesky : public SparseCholesky {
  public:
   // Factory
-  static CXSparseCholesky* Create(const OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
 
   // SparseCholesky interface.
   virtual ~CXSparseCholesky();
diff --git a/internal/ceres/eigensparse.cc b/internal/ceres/eigensparse.cc
index 8ca1d17..425cd8b 100644
--- a/internal/ceres/eigensparse.cc
+++ b/internal/ceres/eigensparse.cc
@@ -41,6 +41,8 @@
 namespace ceres {
 namespace internal {
 
+// TODO(sameeragarwal): Use enable_if to clean up the implementations
+// for when Scalar == double.
 template <typename Solver>
 class EigenSparseCholeskyTemplate : public SparseCholesky {
  public:
@@ -78,23 +80,22 @@
     return LINEAR_SOLVER_SUCCESS;
   }
 
-  virtual LinearSolverTerminationType Solve(const double* rhs_ptr,
-                                            double* solution_ptr,
-                                            std::string* message) {
+  LinearSolverTerminationType Solve(const double* rhs_ptr,
+                                    double* solution_ptr,
+                                    std::string* message) {
     CHECK(analyzed_) << "Solve called without a call to Factorize first.";
 
-    ConstVectorRef rhs(rhs_ptr, solver_.cols());
-    VectorRef solution(solution_ptr, solver_.cols());
+    scalar_rhs_ = ConstVectorRef(rhs_ptr, solver_.cols())
+                      .template cast<typename Solver::Scalar>();
 
     // The two casts are needed if the Scalar in this class is not
     // double. For code simplicitly we are going to assume that Eigen
     // is smart enough to figure out that casting a double Vector to a
     // double Vector is a straight copy. If this turns into a
     // performance bottleneck (unlikely), we can revisit this.
-    solution =
-        solver_
-        .solve(rhs.template cast<typename Solver::Scalar>())
-        .template cast<double>();
+    scalar_solution_ = solver_.solve(scalar_rhs_);
+    VectorRef(solution_ptr, solver_.cols()) =
+        scalar_solution_.template cast<double>();
 
     if (solver_.info() != Eigen::Success) {
       *message = "Eigen failure. Unable to do triangular solve.";
@@ -131,12 +132,16 @@
   }
 
  private:
-  Eigen::Matrix<typename Solver::Scalar, Eigen::Dynamic, 1> values_;
+  Eigen::Matrix<typename Solver::Scalar, Eigen::Dynamic, 1> values_,
+      scalar_rhs_, scalar_solution_;
   bool analyzed_;
   Solver solver_;
 };
 
-SparseCholesky* EigenSparseCholesky::Create(const OrderingType ordering_type) {
+std::unique_ptr<SparseCholesky> EigenSparseCholesky::Create(
+    const OrderingType ordering_type) {
+  std::unique_ptr<SparseCholesky> sparse_cholesky;
+
   // The preprocessor gymnastics here are dealing with the fact that
   // before version 3.2.2, Eigen did not support a third template
   // parameter to specify the ordering and it always defaults to AMD.
@@ -150,44 +155,48 @@
                                 Eigen::NaturalOrdering<int>>
       WithNaturalOrdering;
   if (ordering_type == AMD) {
-    return new EigenSparseCholeskyTemplate<WithAMDOrdering>();
+    sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
   } else {
-    return new EigenSparseCholeskyTemplate<WithNaturalOrdering>();
+    sparse_cholesky.reset(
+        new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
   }
 #else
   typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Upper>
       WithAMDOrdering;
-  return new EigenSparseCholeskyTemplate<WithAMDOrdering>();
+  sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
 #endif
+  return sparse_cholesky;
 }
 
 EigenSparseCholesky::~EigenSparseCholesky() {}
 
-SparseCholesky* FloatEigenSparseCholesky::Create(
+std::unique_ptr<SparseCholesky> FloatEigenSparseCholesky::Create(
     const OrderingType ordering_type) {
+  std::unique_ptr<SparseCholesky> sparse_cholesky;
   // The preprocessor gymnastics here are dealing with the fact that
   // before version 3.2.2, Eigen did not support a third template
   // parameter to specify the ordering and it always defaults to AMD.
 #if EIGEN_VERSION_AT_LEAST(3, 2, 2)
   typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
                                 Eigen::Upper,
-                                Eigen::AMDOrdering<int> >
+                                Eigen::AMDOrdering<int>>
       WithAMDOrdering;
   typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
                                 Eigen::Upper,
-                                Eigen::NaturalOrdering<int> >
+                                Eigen::NaturalOrdering<int>>
       WithNaturalOrdering;
   if (ordering_type == AMD) {
-    LOG(FATAL) << "We should not be doing this";
-    return new EigenSparseCholeskyTemplate<WithAMDOrdering>();
+    sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
   } else {
-    return new EigenSparseCholeskyTemplate<WithNaturalOrdering>();
+    sparse_cholesky.reset(
+        new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
   }
 #else
   typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>, Eigen::Upper>
       WithAMDOrdering;
-  return new EigenSparseCholeskyTemplate<WithAMDOrdering>();
+  sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
 #endif
+  return sparse_cholesky;
 }
 
 FloatEigenSparseCholesky::~FloatEigenSparseCholesky() {}
diff --git a/internal/ceres/eigensparse.h b/internal/ceres/eigensparse.h
index ce8ef34..2e6c6f0 100644
--- a/internal/ceres/eigensparse.h
+++ b/internal/ceres/eigensparse.h
@@ -38,7 +38,9 @@
 
 #ifdef CERES_USE_EIGEN_SPARSE
 
+#include <memory>
 #include <string>
+
 #include "Eigen/SparseCore"
 #include "ceres/linear_solver.h"
 #include "ceres/sparse_cholesky.h"
@@ -49,7 +51,8 @@
 class EigenSparseCholesky : public SparseCholesky {
  public:
   // Factory
-  static SparseCholesky* Create(const OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(
+      const OrderingType ordering_type);
 
   // SparseCholesky interface.
   virtual ~EigenSparseCholesky();
@@ -66,7 +69,8 @@
 class FloatEigenSparseCholesky : public SparseCholesky {
  public:
   // Factory
-  static SparseCholesky* Create(const OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(
+      const OrderingType ordering_type);
 
   // SparseCholesky interface.
   virtual ~FloatEigenSparseCholesky();
diff --git a/internal/ceres/iterative_refiner.cc b/internal/ceres/iterative_refiner.cc
index fff343d..b73c0b0 100644
--- a/internal/ceres/iterative_refiner.cc
+++ b/internal/ceres/iterative_refiner.cc
@@ -38,25 +38,28 @@
 namespace ceres {
 namespace internal {
 
-IterativeRefiner::IterativeRefiner(const int num_cols,
-                                   const int max_num_iterations)
-    : num_cols_(num_cols),
-      max_num_iterations_(max_num_iterations),
-      residual_(num_cols),
-      correction_(num_cols),
-      lhs_x_solution_(num_cols) {}
+IterativeRefiner::IterativeRefiner(const int max_num_iterations)
+    : max_num_iterations_(max_num_iterations) {}
 
 IterativeRefiner::~IterativeRefiner() {}
 
+void IterativeRefiner::Allocate(int num_cols) {
+  residual_.resize(num_cols);
+  correction_.resize(num_cols);
+  lhs_x_solution_.resize(num_cols);
+}
+
 IterativeRefiner::Summary IterativeRefiner::Refine(
     const SparseMatrix& lhs,
     const double* rhs_ptr,
     SparseCholesky* sparse_cholesky,
     double* solution_ptr) {
   Summary summary;
+  const int num_cols = lhs.num_cols();
+  Allocate(num_cols);
 
-  ConstVectorRef rhs(rhs_ptr, num_cols_);
-  VectorRef solution(solution_ptr, num_cols_);
+  ConstVectorRef rhs(rhs_ptr, num_cols);
+  VectorRef solution(solution_ptr, num_cols);
 
   summary.lhs_max_norm = ConstVectorRef(lhs.values(), lhs.num_nonzeros())
                              .lpNorm<Eigen::Infinity>();
diff --git a/internal/ceres/iterative_refiner.h b/internal/ceres/iterative_refiner.h
index d1efb5c..9021e81 100644
--- a/internal/ceres/iterative_refiner.h
+++ b/internal/ceres/iterative_refiner.h
@@ -77,12 +77,9 @@
     double residual_max_norm = -1;
   };
 
-  // num_cols is the number of rows & columns in the linear system
-  // being solved.
-  //
   // max_num_iterations is the maximum number of refinement iterations
   // to perform.
-  IterativeRefiner(int num_cols, int max_num_iterations);
+  IterativeRefiner(int max_num_iterations);
 
   // Needed for mocking.
   virtual ~IterativeRefiner();
@@ -97,13 +94,18 @@
   // to lhs * x = rhs. It can be zero.
   //
   // This method is virtual to facilitate mocking.
+  //
+  // TODO(sameeragarwal): Consider dropping the Summary object, and
+  // simplifying the internal implementation to improve efficiency,
+  // since we do not seem to be using the output at all.
   virtual Summary Refine(const SparseMatrix& lhs,
                          const double* rhs,
                          SparseCholesky* sparse_cholesky,
                          double* solution);
 
  private:
-  int num_cols_;
+  void Allocate(int num_cols);
+
   int max_num_iterations_;
   Vector residual_;
   Vector correction_;
diff --git a/internal/ceres/iterative_refiner_test.cc b/internal/ceres/iterative_refiner_test.cc
index a3bcbf1..0c9ef69 100644
--- a/internal/ceres/iterative_refiner_test.cc
+++ b/internal/ceres/iterative_refiner_test.cc
@@ -147,7 +147,7 @@
        ExactSolutionWithExactFactorizationReturnsInZeroIterations) {
   FakeSparseMatrix lhs(lhs_);
   FakeSparseCholesky<double> sparse_cholesky(lhs_);
-  IterativeRefiner refiner(num_cols_, max_num_iterations_);
+  IterativeRefiner refiner(max_num_iterations_);
   Vector refined_solution = solution_;
   auto summary = refiner.Refine(
       lhs, rhs_.data(), &sparse_cholesky, refined_solution.data());
@@ -161,7 +161,7 @@
        RandomSolutionWithExactFactorizationReturnsInOneIteration) {
   FakeSparseMatrix lhs(lhs_);
   FakeSparseCholesky<double> sparse_cholesky(lhs_);
-  IterativeRefiner refiner(num_cols_, max_num_iterations_);
+  IterativeRefiner refiner(max_num_iterations_);
   Vector refined_solution(num_cols_);
   refined_solution.setRandom();
   auto summary = refiner.Refine(
@@ -178,7 +178,7 @@
   // Use a single precision Cholesky factorization of the double
   // precision matrix. This will give us an approximate factorization.
   FakeSparseCholesky<float> sparse_cholesky(lhs_);
-  IterativeRefiner refiner(num_cols_, max_num_iterations_);
+  IterativeRefiner refiner(max_num_iterations_);
   Vector refined_solution(num_cols_);
   refined_solution.setRandom();
   auto summary = refiner.Refine(
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index a7c61bc..51897fc 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -160,6 +160,8 @@
     int e_block_size = Eigen::Dynamic;
     int f_block_size = Eigen::Dynamic;
 
+    bool use_mixed_precision_solves = false;
+    int max_num_refinement_iterations = 0;
     ContextImpl* context = nullptr;
   };
 
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 7934e36..e051b98 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -227,9 +227,7 @@
     const LinearSolver::Options& options)
     : SchurComplementSolver(options) {
   if (options.type != ITERATIVE_SCHUR) {
-    sparse_cholesky_.reset(
-        SparseCholesky::Create(options.sparse_linear_algebra_library_type,
-                               options.use_postordering ? AMD : NATURAL));
+    sparse_cholesky_ = SparseCholesky::Create(options);
   }
 }
 
diff --git a/internal/ceres/sparse_cholesky.cc b/internal/ceres/sparse_cholesky.cc
index 3c4e97f..ef6d762 100644
--- a/internal/ceres/sparse_cholesky.cc
+++ b/internal/ceres/sparse_cholesky.cc
@@ -38,41 +38,53 @@
 namespace ceres {
 namespace internal {
 
-SparseCholesky* SparseCholesky::Create(
-    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-    OrderingType ordering_type) {
-  switch (sparse_linear_algebra_library_type) {
+std::unique_ptr<SparseCholesky> SparseCholesky::Create(
+    const LinearSolver::Options& options) {
+  const OrderingType ordering_type = options.use_postordering ? AMD : NATURAL;
+  std::unique_ptr<SparseCholesky> sparse_cholesky;
+
+  switch (options.sparse_linear_algebra_library_type) {
     case SUITE_SPARSE:
 #ifndef CERES_NO_SUITESPARSE
-      return SuiteSparseCholesky::Create(ordering_type);
+      sparse_cholesky = SuiteSparseCholesky::Create(ordering_type);
+      break;
 #else
       LOG(FATAL) << "Ceres was compiled without support for SuiteSparse.";
-      return NULL;
 #endif
 
     case EIGEN_SPARSE:
 #ifdef CERES_USE_EIGEN_SPARSE
-      return EigenSparseCholesky::Create(ordering_type);
+      if (options.use_mixed_precision_solves) {
+        sparse_cholesky = FloatEigenSparseCholesky::Create(ordering_type);
+      } else {
+        sparse_cholesky = EigenSparseCholesky::Create(ordering_type);
+      }
+      break;
 #else
       LOG(FATAL) << "Ceres was compiled without support for "
                  << "Eigen's sparse Cholesky factorization routines.";
-      return NULL;
 #endif
 
     case CX_SPARSE:
 #ifndef CERES_NO_CXSPARSE
-      return CXSparseCholesky::Create(ordering_type);
+      sparse_cholesky = CXSparseCholesky::Create(ordering_type);
 #else
       LOG(FATAL) << "Ceres was compiled without support for CXSparse.";
-      return NULL;
 #endif
-
+      break;
     default:
       LOG(FATAL) << "Unknown sparse linear algebra library type : "
                  << SparseLinearAlgebraLibraryTypeToString(
-                        sparse_linear_algebra_library_type);
+                        options.sparse_linear_algebra_library_type);
   }
-  return NULL;
+
+  if (options.max_num_refinement_iterations > 0) {
+    std::unique_ptr<IterativeRefiner> refiner(
+        new IterativeRefiner(options.max_num_refinement_iterations));
+    sparse_cholesky = std::unique_ptr<SparseCholesky>(new RefinedSparseCholesky(
+        std::move(sparse_cholesky), std::move(refiner)));
+  }
+  return sparse_cholesky;
 }
 
 SparseCholesky::~SparseCholesky() {}
@@ -105,7 +117,8 @@
 
 RefinedSparseCholesky::~RefinedSparseCholesky() {}
 
-CompressedRowSparseMatrix::StorageType RefinedSparseCholesky::StorageType() const {
+CompressedRowSparseMatrix::StorageType RefinedSparseCholesky::StorageType()
+    const {
   return sparse_cholesky_->StorageType();
 }
 
diff --git a/internal/ceres/sparse_cholesky.h b/internal/ceres/sparse_cholesky.h
index c0e3e86..bbe4237 100644
--- a/internal/ceres/sparse_cholesky.h
+++ b/internal/ceres/sparse_cholesky.h
@@ -66,14 +66,8 @@
 
 class SparseCholesky {
  public:
-  // Factory which returns an instance of SparseCholesky for the given
-  // sparse linear algebra library and fill reducing ordering
-  // strategy.
-  //
-  // Caller owns the result.
-  static SparseCholesky* Create(
-      SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-      OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(
+      const LinearSolver::Options& options);
 
   virtual ~SparseCholesky();
 
diff --git a/internal/ceres/sparse_cholesky_test.cc b/internal/ceres/sparse_cholesky_test.cc
index f0cd729..1c79bc9 100644
--- a/internal/ceres/sparse_cholesky_test.cc
+++ b/internal/ceres/sparse_cholesky_test.cc
@@ -109,8 +109,12 @@
     const int min_block_size,
     const int max_block_size,
     const double block_density) {
-  std::unique_ptr<SparseCholesky> sparse_cholesky(SparseCholesky::Create(
-      sparse_linear_algebra_library_type, ordering_type));
+  LinearSolver::Options sparse_cholesky_options;
+  sparse_cholesky_options.sparse_linear_algebra_library_type =
+      sparse_linear_algebra_library_type;
+  sparse_cholesky_options.use_postordering  = (ordering_type == AMD);
+  std::unique_ptr<SparseCholesky> sparse_cholesky = SparseCholesky::Create(
+      sparse_cholesky_options);
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky->StorageType();
 
@@ -230,7 +234,7 @@
 
 class MockIterativeRefiner : public IterativeRefiner {
  public:
-  MockIterativeRefiner() : IterativeRefiner(1, 1) {}
+  MockIterativeRefiner() : IterativeRefiner(1) {}
   MOCK_METHOD4(Refine,
                Summary(const SparseMatrix& lhs,
                        const double* rhs,
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index 0572870..0f2e589 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -38,6 +38,7 @@
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/inner_product_computer.h"
 #include "ceres/internal/eigen.h"
+#include "ceres/iterative_refiner.h"
 #include "ceres/linear_solver.h"
 #include "ceres/sparse_cholesky.h"
 #include "ceres/triplet_sparse_matrix.h"
@@ -50,9 +51,7 @@
 SparseNormalCholeskySolver::SparseNormalCholeskySolver(
     const LinearSolver::Options& options)
     : options_(options) {
-  sparse_cholesky_.reset(
-      SparseCholesky::Create(options_.sparse_linear_algebra_library_type,
-                             options_.use_postordering ? AMD : NATURAL));
+  sparse_cholesky_ = SparseCholesky::Create(options);
 }
 
 SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {}
@@ -69,8 +68,11 @@
   summary.message = "Success.";
 
   const int num_cols = A->num_cols();
-  VectorRef(x, num_cols).setZero();
-  A->LeftMultiply(b, x);
+  VectorRef xref(x, num_cols);
+  xref.setZero();
+  rhs_.resize(num_cols);
+  rhs_.setZero();
+  A->LeftMultiply(b, rhs_.data());
   event_logger.AddEvent("Compute RHS");
 
   if (per_solve_options.D != NULL) {
@@ -95,14 +97,16 @@
   inner_product_computer_->Compute();
   event_logger.AddEvent("InnerProductComputer::Compute");
 
-  // TODO(sameeragarwal):
-
   if (per_solve_options.D != NULL) {
     A->DeleteRowBlocks(A->block_structure()->cols.size());
   }
+
   summary.termination_type = sparse_cholesky_->FactorAndSolve(
-      inner_product_computer_->mutable_result(), x, x, &summary.message);
-  event_logger.AddEvent("Factor & Solve");
+      inner_product_computer_->mutable_result(),
+      rhs_.data(),
+      x,
+      &summary.message);
+  event_logger.AddEvent("SparseCholesky::FactorAndSolve");
   return summary;
 }
 
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
index 537a2c9..59c7c0b 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -63,6 +63,7 @@
       double* x);
 
   const LinearSolver::Options options_;
+  Vector rhs_;
   std::unique_ptr<SparseCholesky> sparse_cholesky_;
   std::unique_ptr<InnerProductComputer> inner_product_computer_;
   CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
index c7f5ef6..de6e9fa 100644
--- a/internal/ceres/subset_preconditioner.cc
+++ b/internal/ceres/subset_preconditioner.cc
@@ -44,10 +44,13 @@
 SubsetPreconditioner::SubsetPreconditioner(
     const Preconditioner::Options& options, const BlockSparseMatrix& A)
     : options_(options), num_cols_(A.num_cols()) {
-  sparse_cholesky_.reset(
-      SparseCholesky::Create(options_.sparse_linear_algebra_library_type,
-                             options_.use_postordering ? AMD : NATURAL));
   CHECK_GE(options_.subset_preconditioner_start_row_block, 0);
+  LinearSolver::Options sparse_cholesky_options;
+  sparse_cholesky_options.sparse_linear_algebra_library_type =
+      options_.sparse_linear_algebra_library_type;
+  sparse_cholesky_options.use_postordering =
+      options_.use_postordering;
+  sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
 }
 
 SubsetPreconditioner::~SubsetPreconditioner() {}
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index cc07a01..f10f57e 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -348,9 +348,9 @@
 #endif
 }
 
-SuiteSparseCholesky* SuiteSparseCholesky::Create(
+std::unique_ptr<SparseCholesky> SuiteSparseCholesky::Create(
     const OrderingType ordering_type) {
-  return new SuiteSparseCholesky(ordering_type);
+  return std::unique_ptr<SparseCholesky>(new SuiteSparseCholesky(ordering_type));
 }
 
 SuiteSparseCholesky::SuiteSparseCholesky(const OrderingType ordering_type)
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index 91cd24b..6fef0fd 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -285,7 +285,8 @@
 
 class SuiteSparseCholesky : public SparseCholesky {
  public:
-  static SuiteSparseCholesky* Create(const OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(
+      OrderingType ordering_type);
 
   // SparseCholesky interface.
   virtual ~SuiteSparseCholesky();
diff --git a/internal/ceres/trust_region_preprocessor.cc b/internal/ceres/trust_region_preprocessor.cc
index 96f597c..cca2cf7 100644
--- a/internal/ceres/trust_region_preprocessor.cc
+++ b/internal/ceres/trust_region_preprocessor.cc
@@ -193,6 +193,10 @@
   pp->linear_solver_options.use_explicit_schur_complement =
       options.use_explicit_schur_complement;
   pp->linear_solver_options.dynamic_sparsity = options.dynamic_sparsity;
+  pp->linear_solver_options.use_mixed_precision_solves =
+      options.use_mixed_precision_solves;
+  pp->linear_solver_options.max_num_refinement_iterations =
+      options.max_num_refinement_iterations;
   pp->linear_solver_options.num_threads = options.num_threads;
   pp->linear_solver_options.use_postordering = options.use_postordering;
   pp->linear_solver_options.context = pp->problem->context();
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 36247b7..7cd9bd4 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -105,8 +105,18 @@
   InitEliminator(bs);
   const time_t eliminator_time = time(NULL);
 
-  sparse_cholesky_.reset(
-      SparseCholesky::Create(options_.sparse_linear_algebra_library_type, AMD));
+  LinearSolver::Options sparse_cholesky_options;
+  sparse_cholesky_options.sparse_linear_algebra_library_type =
+      options_.sparse_linear_algebra_library_type;
+
+  // The preconditioner's sparsity is not available in the
+  // preprocessor, so the columns of the Jacobian have not been
+  // reordered to minimize fill in when computing its sparse Cholesky
+  // factorization. So we must tell the SparseCholesky object to
+  // perform approximiate minimum-degree reordering, which is done by
+  // setting use_postordering to true.
+  sparse_cholesky_options.use_postordering = true;
+  sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
 
   const time_t init_time = time(NULL);
   VLOG(2) << "init time: " << init_time - start_time