Enable Eigen as sparse linear algebra library.

SPARSE_NORMAL_CHOLESKY and SPARSE_SCHUR can now be used
with EIGEN_SPARSE as the backend.

The performance is not as good as CXSparse. This needs to be
investigated. Is it because the quality of AMD ordering that
we are computing is not as good as the one for CXSparse? This
could be because we are working with the scalar matrix instead
of the block matrix.

Also, the upper/lower triangular story is not completely clear.
Both of these issues will be benchmarked and tackled in the
near future.

Also included in this change is a bunch of cleanup to the
SparseNormalCholeskySolver and SparseSchurComplementSolver
classes around the use of the of defines used to conditionally
compile out parts of the code.

The system_test has been updated to test EIGEN_SPARSE also.

Change-Id: I46a57e9c4c97782696879e0b15cfc7a93fe5496a
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index 1fed82f..5868401 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -38,7 +38,6 @@
 
 #include <vector>
 #include "cs.h"
-#include "ceres/internal/port.h"
 
 namespace ceres {
 namespace internal {
@@ -130,9 +129,13 @@
 
 #else  // CERES_NO_CXSPARSE
 
-class CXSparse {};
 typedef void cs_dis;
 
+class CXSparse {
+ public:
+  void Free(void*) {};
+
+};
 #endif  // CERES_NO_CXSPARSE
 
 #endif  // CERES_INTERNAL_CXSPARSE_H_
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
index fa9e1d8..162bfb8 100644
--- a/internal/ceres/reorder_program.cc
+++ b/internal/ceres/reorder_program.cc
@@ -114,22 +114,22 @@
   LOG(FATAL) << "Congratulations, you found a Ceres bug! "
              << "Please report this error to the developers.";
 #else  // CERES_NO_CXSPARSE
-    // CXSparse works with J'J instead of J'. So compute the block
-    // sparsity for J'J and compute an approximate minimum degree
-    // ordering.
-    CXSparse cxsparse;
-    cs_di* block_jacobian_transpose;
-    block_jacobian_transpose =
-        cxsparse.CreateSparseMatrix(
+  // CXSparse works with J'J instead of J'. So compute the block
+  // sparsity for J'J and compute an approximate minimum degree
+  // ordering.
+  CXSparse cxsparse;
+  cs_di* block_jacobian_transpose;
+  block_jacobian_transpose =
+      cxsparse.CreateSparseMatrix(
             const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
-    cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
-    cs_di* block_hessian =
-        cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
-    cxsparse.Free(block_jacobian);
-    cxsparse.Free(block_jacobian_transpose);
+  cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
+  cs_di* block_hessian =
+      cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
+  cxsparse.Free(block_jacobian);
+  cxsparse.Free(block_jacobian_transpose);
 
-    cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, ordering);
-    cxsparse.Free(block_hessian);
+  cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, ordering);
+  cxsparse.Free(block_hessian);
 #endif  // CERES_NO_CXSPARSE
 }
 
@@ -378,11 +378,26 @@
     string* error) {
 
   if (sparse_linear_algebra_library_type != SUITE_SPARSE &&
-      sparse_linear_algebra_library_type != CX_SPARSE) {
+      sparse_linear_algebra_library_type != CX_SPARSE &&
+      sparse_linear_algebra_library_type != EIGEN_SPARSE) {
     *error = "Unknown sparse linear algebra library.";
     return false;
   }
 
+  // For Eigen, there is nothing to do. This is because Eigen in its
+  // current stable version does not expose a method for doing
+  // symbolic analysis on pre-ordered matrices, so a block
+  // pre-ordering is a bit pointless.
+  //
+  // The dev version as recently as July 20, 2014 has support for
+  // pre-ordering. Once this becomes more widespread, or we add
+  // support for detecting Eigen versions, we can add support for this
+  // along the lines of CXSparse.
+  if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+    program->SetParameterOffsetsAndIndex();
+    return true;
+  }
+
   // Set the offsets and index for CreateJacobianSparsityTranspose.
   program->SetParameterOffsetsAndIndex();
   // Compute a block sparse presentation of J'.
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index d3eef5d..f295d60 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -28,12 +28,13 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/internal/port.h"
+
 #include <algorithm>
 #include <ctime>
 #include <set>
 #include <vector>
 
-#include "Eigen/Dense"
 #include "ceres/block_random_access_dense_matrix.h"
 #include "ceres/block_random_access_matrix.h"
 #include "ceres/block_random_access_sparse_matrix.h"
@@ -42,7 +43,6 @@
 #include "ceres/cxsparse.h"
 #include "ceres/detect_structure.h"
 #include "ceres/internal/eigen.h"
-#include "ceres/internal/port.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/lapack.h"
 #include "ceres/linear_solver.h"
@@ -51,6 +51,8 @@
 #include "ceres/triplet_sparse_matrix.h"
 #include "ceres/types.h"
 #include "ceres/wall_time.h"
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
 
 namespace ceres {
 namespace internal {
@@ -138,7 +140,8 @@
         .llt();
     if (llt.info() != Eigen::Success) {
       summary.termination_type = LINEAR_SOLVER_FAILURE;
-      summary.message = "Eigen LLT decomposition failed.";
+      summary.message =
+          "Eigen failure. Unable to perform dense Cholesky factorization.";
       return summary;
     }
 
@@ -155,8 +158,6 @@
   return summary;
 }
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
-
 SparseSchurComplementSolver::SparseSchurComplementSolver(
     const LinearSolver::Options& options)
     : SchurComplementSolver(options),
@@ -165,19 +166,15 @@
 }
 
 SparseSchurComplementSolver::~SparseSchurComplementSolver() {
-#ifndef CERES_NO_SUITESPARSE
   if (factor_ != NULL) {
     ss_.Free(factor_);
     factor_ = NULL;
   }
-#endif  // CERES_NO_SUITESPARSE
 
-#ifndef CERES_NO_CXSPARSE
   if (cxsparse_factor_ != NULL) {
     cxsparse_.Free(cxsparse_factor_);
     cxsparse_factor_ = NULL;
   }
-#endif  // CERES_NO_CXSPARSE
 }
 
 // Determine the non-zero blocks in the Schur Complement matrix, and
@@ -258,6 +255,8 @@
       return SolveReducedLinearSystemUsingSuiteSparse(solution);
     case CX_SPARSE:
       return SolveReducedLinearSystemUsingCXSparse(solution);
+    case EIGEN_SPARSE:
+      return SolveReducedLinearSystemUsingEigen(solution);
     default:
       LOG(FATAL) << "Unknown sparse linear algebra library : "
                  << options().sparse_linear_algebra_library_type;
@@ -266,13 +265,23 @@
   return LinearSolver::Summary();
 }
 
-#ifndef CERES_NO_SUITESPARSE
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessSparseMatrix.  The linear system is solved using
 // CHOLMOD's sparse cholesky factorization routines.
 LinearSolver::Summary
 SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
     double* solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message = "Ceres was not built with SuiteSparse support."
+      "Therefore, SPARSE_SCHUR cannot be used with SUITE_SPARSE";
+  return summary;
+
+#else
+
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
   summary.termination_type = LINEAR_SOLVER_SUCCESS;
@@ -326,6 +335,8 @@
   if (factor_ == NULL) {
     ss_.Free(cholmod_lhs);
     summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    // No need to set message as it has already been set by the
+    // symbolic analysis routines above.
     return summary;
   }
 
@@ -335,6 +346,8 @@
   ss_.Free(cholmod_lhs);
 
   if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
+    // No need to set message as it has already been set by the
+    // numeric factorization routine above.
     return summary;
   }
 
@@ -346,6 +359,8 @@
   ss_.Free(cholmod_rhs);
 
   if (cholmod_solution == NULL) {
+    summary.message =
+        "SuiteSparse failure. Unable to perform triangular solve.";
     summary.termination_type = LINEAR_SOLVER_FAILURE;
     return summary;
   }
@@ -354,23 +369,26 @@
       = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
   ss_.Free(cholmod_solution);
   return summary;
-}
-#else
-LinearSolver::Summary
-SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
-    double* solution) {
-  LOG(FATAL) << "No SuiteSparse support in Ceres.";
-  return LinearSolver::Summary();
-}
 #endif  // CERES_NO_SUITESPARSE
+}
 
-#ifndef CERES_NO_CXSPARSE
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessSparseMatrix.  The linear system is solved using
 // CXSparse's sparse cholesky factorization routines.
 LinearSolver::Summary
 SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
     double* solution) {
+#ifdef CERES_NO_CXSPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message = "Ceres was not built with CXSparse support."
+      "Therefore, SPARSE_SCHUR cannot be used with CX_SPARSE";
+  return summary;
+
+#else
+
   LinearSolver::Summary summary;
   summary.num_iterations = 0;
   summary.termination_type = LINEAR_SOLVER_SUCCESS;
@@ -407,16 +425,96 @@
 
   cxsparse_.Free(lhs);
   return summary;
-}
-#else
-LinearSolver::Summary
-SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
-    double* solution) {
-  LOG(FATAL) << "No CXSparse support in Ceres.";
-  return LinearSolver::Summary();
-}
 #endif  // CERES_NO_CXPARSE
+}
 
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessSparseMatrix.  The linear system is solved using
+// Eigen's sparse cholesky factorization routines.
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingEigen(
+    double* solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_SCHUR cannot be used with EIGEN_SPARSE."
+      "Ceres was not built with support for"
+      "Eigen's SimplicialLDLT decomposition."
+      "This requires enabling building with -DEIGENSPARSE=ON.";
+  return summary;
+
+#else
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  // Extract the TripletSparseMatrix that is used for actually storing S.
+  TripletSparseMatrix* tsm =
+      const_cast<TripletSparseMatrix*>(
+          down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
+  const int num_rows = tsm->num_rows();
+
+  // The case where there are no f blocks, and the system is block
+  // diagonal.
+  if (num_rows == 0) {
+    return summary;
+  }
+
+  CompressedRowSparseMatrix crsm(*tsm);
+
+  // The crsm above is a symmetric matrix in upper triangular form, in
+  // compressed row form. When we map it to a compressed column matrix
+  // for Eigen, that turns it into a lower triangular matrix.
+  //
+  // TODO(sameeragarwal): What is the best memory layout for sparse
+  // cholesky factorization?
+  Eigen::MappedSparseMatrix<double, Eigen::ColMajor> eigen_lhs(
+      crsm.num_rows(),
+      crsm.num_rows(),
+      crsm.num_nonzeros(),
+      crsm.mutable_rows(),
+      crsm.mutable_cols(),
+      crsm.mutable_values());
+
+  typedef Eigen::SimplicialLDLT<
+    Eigen::SparseMatrix<double, Eigen::ColMajor>,
+    Eigen::Lower> SimplicialLDLT;
+
+  // Compute symbolic factorization if one does not exist.
+  if (simplicial_ldlt_.get() == NULL) {
+    simplicial_ldlt_.reset(new SimplicialLDLT);
+    simplicial_ldlt_->analyzePattern(eigen_lhs.selfadjointView<Eigen::Lower>());
+    if (simplicial_ldlt_->info() != Eigen::Success) {
+      summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+      summary.message =
+          "Eigen failure. Unable to find symbolic factorization.";
+      return summary;
+    }
+  }
+
+  simplicial_ldlt_->factorize(eigen_lhs.selfadjointView<Eigen::Lower>());
+  if (simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen failure. Unable to find numeric factoriztion.";
+    return summary;
+  }
+
+  VectorRef(solution, num_rows) =
+      simplicial_ldlt_->solve(ConstVectorRef(rhs(), num_rows));
+
+  if (simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen failure. Unable to do triangular solve.";
+  }
+
+  return summary;
+#endif  // CERES_USE_EIGEN_SPARSE
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
index a997851..e911bfb 100644
--- a/internal/ceres/schur_complement_solver.h
+++ b/internal/ceres/schur_complement_solver.h
@@ -35,6 +35,8 @@
 #include <utility>
 #include <vector>
 
+#include "ceres/internal/port.h"
+
 #include "ceres/block_random_access_matrix.h"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/block_structure.h"
@@ -45,6 +47,10 @@
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/types.h"
 
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
+
 namespace ceres {
 namespace internal {
 
@@ -153,7 +159,6 @@
   CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
 };
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
 // Sparse Cholesky factorization based solver.
 class SparseSchurComplementSolver : public SchurComplementSolver {
  public:
@@ -168,6 +173,8 @@
       double* solution);
   LinearSolver::Summary SolveReducedLinearSystemUsingCXSparse(
       double* solution);
+  LinearSolver::Summary SolveReducedLinearSystemUsingEigen(
+      double* solution);
 
   // Size of the blocks in the Schur complement.
   vector<int> blocks_;
@@ -180,10 +187,16 @@
   CXSparse cxsparse_;
   // Cached factorization
   cs_dis* cxsparse_factor_;
+
+#ifdef CERES_USE_EIGEN_SPARSE
+  scoped_ptr<Eigen::SimplicialLDLT<
+               Eigen::SparseMatrix<double, Eigen::ColMajor>,
+               Eigen::Lower> > simplicial_ldlt_;
+#endif
+
   CERES_DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
 };
 
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index d91c162..8e71b2e 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -187,17 +187,31 @@
 
 #ifndef CERES_NO_CXSPARSE
 TEST_F(SchurComplementSolverTest,
-       SparseSchurWithSuiteSparseSmallProblem) {
+       SparseSchurWithCXSparseSmallProblem) {
   ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
   ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
 }
 
 TEST_F(SchurComplementSolverTest,
-       SparseSchurWithSuiteSparseLargeProblem) {
+       SparseSchurWithCXSparseLargeProblem) {
   ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
   ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
 }
 #endif  // CERES_NO_CXSPARSE
 
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithEigenSparseSmallProblem) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithEigenSparseLargeProblem) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
index a4edf08..a1cf4ca 100644
--- a/internal/ceres/solver_impl.cc
+++ b/internal/ceres/solver_impl.cc
@@ -826,14 +826,6 @@
   }
 #endif
 
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-  if (options->linear_solver_type == SPARSE_SCHUR) {
-    *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor"
-        "CXSparse was enabled when Ceres was compiled.";
-    return NULL;
-  }
-#endif
-
   if (options->max_linear_solver_iterations <= 0) {
     *error = "Solver::Options::max_linear_solver_iterations is not positive.";
     return NULL;
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index cf5bb23..4277480 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -31,8 +31,6 @@
 // This include must come before any #ifndef check on Ceres compile options.
 #include "ceres/internal/port.h"
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
-
 #include "ceres/sparse_normal_cholesky_solver.h"
 
 #include <algorithm>
@@ -48,6 +46,8 @@
 #include "ceres/triplet_sparse_matrix.h"
 #include "ceres/types.h"
 #include "ceres/wall_time.h"
+#include "Eigen/SparseCore"
+
 
 namespace ceres {
 namespace internal {
@@ -56,23 +56,19 @@
     const LinearSolver::Options& options)
     : factor_(NULL),
       cxsparse_factor_(NULL),
-      options_(options) {
+      options_(options){
 }
 
 void SparseNormalCholeskySolver::FreeFactorization() {
-#ifndef CERES_NO_SUITESPARSE
   if (factor_ != NULL) {
     ss_.Free(factor_);
     factor_ = NULL;
   }
-#endif  // CERES_NO_SUITESPARSE
 
-#ifndef CERES_NO_CXSPARSE
   if (cxsparse_factor_ != NULL) {
     cxsparse_.Free(cxsparse_factor_);
     cxsparse_factor_ = NULL;
   }
-#endif  // CERES_NO_CXSPARSE
 }
 
 SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
@@ -111,6 +107,9 @@
     case CX_SPARSE:
       summary = SolveImplUsingCXSparse(A, per_solve_options, x);
       break;
+    case EIGEN_SPARSE:
+      summary = SolveImplUsingEigen(A, per_solve_options, x);
+      break;
     default:
       LOG(FATAL) << "Unknown sparse linear algebra library : "
                  << options_.sparse_linear_algebra_library_type;
@@ -123,11 +122,129 @@
   return summary;
 }
 
-#ifndef CERES_NO_CXSPARSE
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingEigen(
+    CompressedRowSparseMatrix* A,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double * rhs_and_solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+   summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE"
+      "because Ceres was not built with support for"
+      "Eigen's SimplicialLDLT decomposition."
+      "This requires enabling building with -DEIGENSPARSE=ON.";
+  return summary;
+
+#else
+
+  EventLogger event_logger("SparseNormalCholeskySolver::Eigen::Solve");
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  // Compute the normal equations. J'J delta = J'f and solve them
+  // using a sparse Cholesky factorization. Notice that when compared
+  // to SuiteSparse we have to explicitly compute the normal equations
+  // before they can be factorized. CHOLMOD/SuiteSparse on the other
+  // hand can just work off of Jt to compute the Cholesky
+  // factorization of the normal equations.
+  //
+  // TODO(sameeragarwal): See note about how this maybe a bad idea for
+  // dynamic sparsity.
+  if (outer_product_.get() == NULL) {
+    outer_product_.reset(
+        CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+            *A, &pattern_));
+  }
+
+  CompressedRowSparseMatrix::ComputeOuterProduct(
+      *A, pattern_, outer_product_.get());
+
+  // Map to an upper triangular column major matrix.
+  //
+  // outer_product_ is a compressed row sparse matrix and in lower
+  // triangular form, when mapped to a compressed column sparse
+  // matrix, it becomes an upper triangular matrix.
+  //
+  // TODO(sameeragarwal): It is not clear to me if an upper triangular
+  // column major matrix is the way to go here, or if a lower
+  // triangular matrix is better. This will require some testing. If
+  // it turns out that the lower triangular is better, then the logic
+  // used to compute the outer product needs to be updated.
+  Eigen::MappedSparseMatrix<double, Eigen::ColMajor> AtA(
+      outer_product_->num_rows(),
+      outer_product_->num_rows(),
+      outer_product_->num_nonzeros(),
+      outer_product_->mutable_rows(),
+      outer_product_->mutable_cols(),
+      outer_product_->mutable_values());
+
+  const Vector b = VectorRef(rhs_and_solution, outer_product_->num_rows());
+  if (simplicial_ldlt_.get() == NULL || options_.dynamic_sparsity) {
+    typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double, Eigen::ColMajor>,
+                                  Eigen::Upper> SimplicialLDLT;
+    simplicial_ldlt_.reset(new SimplicialLDLT);
+    // This is a crappy way to be doing this. But right now Eigen does
+    // not expose a way to do symbolic analysis with a given
+    // permutation pattern, so we cannot use a block analysis of the
+    // Jacobian.
+    simplicial_ldlt_->analyzePattern(AtA.selfadjointView<Eigen::Upper>());
+    if (simplicial_ldlt_->info() != Eigen::Success) {
+      summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+      summary.message =
+          "Eigen failure. Unable to find symbolic factorization.";
+      return summary;
+    }
+  }
+  event_logger.AddEvent("Analysis");
+
+  simplicial_ldlt_->factorize(AtA.selfadjointView<Eigen::Upper>());
+  if(simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message =
+        "Eigen failure. Unable to find numeric factorization.";
+    return summary;
+  }
+
+  VectorRef(rhs_and_solution, outer_product_->num_rows()) =
+      simplicial_ldlt_->solve(b);
+  if(simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message =
+        "Eigen failure. Unable to do triangular solve.";
+    return summary;
+  }
+
+  event_logger.AddEvent("Solve");
+  return summary;
+#endif  // EIGEN_USE_EIGEN_SPARSE
+}
+
+
+
 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
     CompressedRowSparseMatrix* A,
     const LinearSolver::PerSolveOptions& per_solve_options,
     double * rhs_and_solution) {
+#ifdef CERES_NO_CXSPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE"
+      "because Ceres was not built with support for CXSparse."
+      "This requires enabling building with -DCXSPARSE=ON.";
+
+  return summary;
+
+#else
+
   EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
 
   LinearSolver::Summary summary;
@@ -137,11 +254,14 @@
 
   // Compute the normal equations. J'J delta = J'f and solve them
   // using a sparse Cholesky factorization. Notice that when compared
-  // to SuiteSparse we have to explicitly compute the transpose of Jt,
-  // and then the normal equations before they can be
-  // factorized. CHOLMOD/SuiteSparse on the other hand can just work
-  // off of Jt to compute the Cholesky factorization of the normal
-  // equations.
+  // to SuiteSparse we have to explicitly compute the normal equations
+  // before they can be factorized. CHOLMOD/SuiteSparse on the other
+  // hand can just work off of Jt to compute the Cholesky
+  // factorization of the normal equations.
+  //
+  // TODO(sameeragarwal): If dynamic sparsity is enabled, then this is
+  // not a good idea performance wise, since the jacobian has far too
+  // many entries and the program will go crazy with memory.
   if (outer_product_.get() == NULL) {
     outer_product_.reset(
         CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
@@ -181,28 +301,31 @@
         "CXSparse failure. Unable to find symbolic factorization.";
   } else if (!cxsparse_.SolveCholesky(AtA, cxsparse_factor_, rhs_and_solution)) {
     summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "CXSparse::SolveCholesky failed.";
   }
   event_logger.AddEvent("Solve");
 
   return summary;
-}
-#else
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
-    CompressedRowSparseMatrix* A,
-    const LinearSolver::PerSolveOptions& per_solve_options,
-    double * rhs_and_solution) {
-  LOG(FATAL) << "No CXSparse support in Ceres.";
-
-  // Unreachable but MSVC does not know this.
-  return LinearSolver::Summary();
-}
 #endif
+}
 
-#ifndef CERES_NO_SUITESPARSE
 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
     CompressedRowSparseMatrix* A,
     const LinearSolver::PerSolveOptions& per_solve_options,
     double * rhs_and_solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE"
+      "because Ceres was not built with support for SuiteSparse."
+      "This requires enabling building with -DSUITESPARSE=ON.";
+  return summary;
+
+#else
+
   EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
   LinearSolver::Summary summary;
   summary.termination_type = LINEAR_SOLVER_SUCCESS;
@@ -234,6 +357,8 @@
 
   if (factor_ == NULL) {
     summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    // No need to set message as it has already been set by the
+    // symbolic analysis routines above.
     return summary;
   }
 
@@ -251,25 +376,15 @@
     memcpy(rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
     ss_.Free(solution);
   } else {
+    // No need to set message as it has already been set by the
+    // numeric factorization routine above.
     summary.termination_type = LINEAR_SOLVER_FAILURE;
   }
 
   event_logger.AddEvent("Teardown");
   return summary;
-}
-#else
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
-    CompressedRowSparseMatrix* A,
-    const LinearSolver::PerSolveOptions& per_solve_options,
-    double * rhs_and_solution) {
-  LOG(FATAL) << "No SuiteSparse support in Ceres.";
-
-  // Unreachable but MSVC does not know this.
-  return LinearSolver::Summary();
-}
 #endif
+}
 
 }   // namespace internal
 }   // namespace ceres
-
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
index ed777a1..0e6a988 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -37,12 +37,14 @@
 // This include must come before any #ifndef check on Ceres compile options.
 #include "ceres/internal/port.h"
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
-
-#include "ceres/cxsparse.h"
 #include "ceres/internal/macros.h"
 #include "ceres/linear_solver.h"
 #include "ceres/suitesparse.h"
+#include "ceres/cxsparse.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
 
 namespace ceres {
 namespace internal {
@@ -74,6 +76,12 @@
       const LinearSolver::PerSolveOptions& options,
       double* rhs_and_solution);
 
+  // Crashes if CERES_USE_LGPGL_CODE is not defined.
+  LinearSolver::Summary SolveImplUsingEigen(
+      CompressedRowSparseMatrix* A,
+      const LinearSolver::PerSolveOptions& options,
+      double* rhs_and_solution);
+
   void FreeFactorization();
 
   SuiteSparse ss_;
@@ -83,6 +91,13 @@
   CXSparse cxsparse_;
   // Cached factorization
   cs_dis* cxsparse_factor_;
+
+#ifdef CERES_USE_EIGEN_SPARSE
+  scoped_ptr<Eigen::SimplicialLDLT<
+               Eigen::SparseMatrix<double, Eigen::ColMajor>,
+               Eigen::Upper> > simplicial_ldlt_;
+#endif
+
   scoped_ptr<CompressedRowSparseMatrix> outer_product_;
   vector<int> pattern_;
   const LinearSolver::Options options_;
@@ -92,5 +107,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
 #endif  // CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index f27e338..baab899 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -286,6 +286,7 @@
 typedef void cholmod_factor;
 
 class SuiteSparse {
+ public:
   // Defining this static function even when SuiteSparse is not
   // available, allows client code to check for the presence of CAMD
   // without checking for the absence of the CERES_NO_CAMD symbol.
@@ -296,6 +297,8 @@
   static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
     return false;
   }
+
+  void Free(void*) {};
 };
 
 #endif  // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
index f4b9ff3..f70b79a 100644
--- a/internal/ceres/system_test.cc
+++ b/internal/ceres/system_test.cc
@@ -43,6 +43,8 @@
 #include <cstdlib>
 #include <string>
 
+#include "ceres/internal/port.h"
+
 #include "ceres/autodiff_cost_function.h"
 #include "ceres/ordered_groups.h"
 #include "ceres/problem.h"
@@ -493,40 +495,45 @@
                                  ordering,                              \
                                  preconditioner))
 
+  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kUserOrdering,      IDENTITY);
+
+  CONFIGURE(CGNR,                   SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      JACOBI);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      SCHUR_JACOBI);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
+
 #ifndef CERES_NO_SUITESPARSE
   CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
   CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kUserOrdering,      IDENTITY);
 
   CONFIGURE(SPARSE_SCHUR,           SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
   CONFIGURE(SPARSE_SCHUR,           SUITE_SPARSE, kUserOrdering,      IDENTITY);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_JACOBI);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_TRIDIAGONAL);
 #endif  // CERES_NO_SUITESPARSE
 
 #ifndef CERES_NO_CXSPARSE
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE, kAutomaticOrdering, IDENTITY);
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE, kUserOrdering,      IDENTITY);
+
   CONFIGURE(SPARSE_SCHUR,           CX_SPARSE,    kAutomaticOrdering, IDENTITY);
   CONFIGURE(SPARSE_SCHUR,           CX_SPARSE,    kUserOrdering,      IDENTITY);
 #endif  // CERES_NO_CXSPARSE
 
-  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
-  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kUserOrdering,      IDENTITY);
-
-  CONFIGURE(CGNR,                   SUITE_SPARSE, kAutomaticOrdering, JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      SCHUR_JACOBI);
-
-#ifndef CERES_NO_SUITESPARSE
-
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_TRIDIAGONAL);
-#endif  // CERES_NO_SUITESPARSE
-
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
-
-#ifndef CERES_NO_SUITESPARSE
-
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
-#endif  // CERES_NO_SUITESPARSE
+#ifdef CERES_USE_EIGEN_SPARSE
+  CONFIGURE(SPARSE_SCHUR,           EIGEN_SPARSE,    kAutomaticOrdering, IDENTITY);
+  CONFIGURE(SPARSE_SCHUR,           EIGEN_SPARSE,    kUserOrdering,      IDENTITY);
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, EIGEN_SPARSE,    kAutomaticOrdering, IDENTITY);
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, EIGEN_SPARSE,    kUserOrdering,      IDENTITY);
+#endif  // CERES_USE_EIGEN_SPARSE
 
 #undef CONFIGURE
 
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
index adf1090..4710261 100644
--- a/internal/ceres/types.cc
+++ b/internal/ceres/types.cc
@@ -96,6 +96,7 @@
   switch (type) {
     CASESTR(SUITE_SPARSE);
     CASESTR(CX_SPARSE);
+    CASESTR(EIGEN_SPARSE);
     default:
       return "UNKNOWN";
   }
@@ -107,6 +108,7 @@
   UpperCase(&value);
   STRENUM(SUITE_SPARSE);
   STRENUM(CX_SPARSE);
+  STRENUM(EIGEN_SPARSE);
   return false;
 }
 
diff --git a/internal/ceres/unsymmetric_linear_solver_test.cc b/internal/ceres/unsymmetric_linear_solver_test.cc
index 412e611..0b82e6a 100644
--- a/internal/ceres/unsymmetric_linear_solver_test.cc
+++ b/internal/ceres/unsymmetric_linear_solver_test.cc
@@ -107,13 +107,19 @@
               LINEAR_SOLVER_SUCCESS);
 
     for (int i = 0; i < A_->num_cols(); ++i) {
-      EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8);
+      EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8)
+          << "\nExpected: "
+          << ConstVectorRef(sol_unregularized_.get(), A_->num_cols()).transpose()
+          << "\nActual: " << x_unregularized.transpose();
     }
 
     EXPECT_EQ(regularized_solve_summary.termination_type,
               LINEAR_SOLVER_SUCCESS);
     for (int i = 0; i < A_->num_cols(); ++i) {
-      EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8);
+      EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8)
+          << "\nExpected: "
+          << ConstVectorRef(sol_regularized_.get(), A_->num_cols()).transpose()
+          << "\nActual: " << x_regularized.transpose();
     }
   }
 
@@ -212,5 +218,35 @@
 }
 #endif
 
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingEigenPreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingEigenPostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingEigenDynamicSparsity) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.dynamic_sparsity = true;
+  TestSolver(options);
+}
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
 }  // namespace internal
 }  // namespace ceres