More pre-ordering support.

1. CX_SPARSE supports pre-ordering of the jacobian.
2. Add support for constrained approximate minimum degree ordering
   for SuiteSparse versions >= 4.2.0
3. Using 2, support for pre-ordering for SPARSE_SCHUR when used
   with SUITE_SPARSE.
4. Using 2, support for user orderings in SPARSE_NORMAL_CHOLESKY.
5. Minor cleanups in documentation and code all around.
6. Test update and refactoring.

Change-Id: Ibfe3ac95d59d54ab14d1d60a07f767688070f29f
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index b7f2520..3279697 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -93,6 +93,11 @@
   return cs_schol(1, A);
 }
 
+cs_dis* CXSparse::AnalyzeCholeskyWithNaturalOrdering(cs_di* A) {
+  // order = 0 for Natural ordering.
+  return cs_schol(0, A);
+}
+
 cs_dis* CXSparse::BlockAnalyzeCholesky(cs_di* A,
                                        const vector<int>& row_blocks,
                                        const vector<int>& col_blocks) {
@@ -173,6 +178,20 @@
   return cs_compress(&tsm_wrapper);
 }
 
+void CXSparse::ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering) {
+  int* cs_ordering = cs_amd(1, A);
+  copy(cs_ordering, cs_ordering + A->m, ordering);
+  cs_free(cs_ordering);
+}
+
+cs_di* CXSparse::TransposeMatrix(cs_di* A) {
+  return cs_di_transpose(A, 1);
+}
+
+cs_di* CXSparse::MatrixMatrixMultiply(cs_di* A, cs_di* B) {
+  return cs_di_multiply(A, B);
+}
+
 void CXSparse::Free(cs_di* sparse_matrix) {
   cs_di_spfree(sparse_matrix);
 }
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index d34b635..6004301 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -70,14 +70,49 @@
   // with Free. May return NULL if the compression or allocation fails.
   cs_di* CreateSparseMatrix(TripletSparseMatrix* A);
 
+  // B = A'
+  //
+  // The returned matrix should be deallocated with Free when not used
+  // anymore.
+  cs_di* TransposeMatrix(cs_di* A);
+
+  // C = A * B
+  //
+  // The returned matrix should be deallocated with Free when not used
+  // anymore.
+  cs_di* MatrixMatrixMultiply(cs_di* A, cs_di* B);
+
   // Computes a symbolic factorization of A that can be used in SolveCholesky.
+  //
   // The returned matrix should be deallocated with Free when not used anymore.
   cs_dis* AnalyzeCholesky(cs_di* A);
 
+  // Computes a symbolic factorization of A that can be used in
+  // SolveCholesky, but does not compute a fill-reducing ordering.
+  //
+  // The returned matrix should be deallocated with Free when not used anymore.
+  cs_dis* AnalyzeCholeskyWithNaturalOrdering(cs_di* A);
+
+  // Computes a symbolic factorization of A that can be used in
+  // SolveCholesky. The difference from AnalyzeCholesky is that this
+  // function first detects the block sparsity of the matrix using
+  // information about the row and column blocks and uses this block
+  // sparse matrix to find a fill-reducing ordering. This ordering is
+  // then used to find a symbolic factorization. This can result in a
+  // significant performance improvement AnalyzeCholesky on block
+  // sparse matrices.
+  //
+  // The returned matrix should be deallocated with Free when not used
+  // anymore.
   cs_dis* BlockAnalyzeCholesky(cs_di* A,
                                const vector<int>& row_blocks,
                                const vector<int>& col_blocks);
 
+  // Compute an fill-reducing approximate minimum degree ordering of
+  // the matrix A. ordering should be non-NULL and should point to
+  // enough memory to hold the ordering for the rows of A.
+  void ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering);
+
   void Free(cs_di* sparse_matrix);
   void Free(cs_dis* symbolic_factorization);
 
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 0defcd6..09f61d7 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -276,26 +276,42 @@
     return true;
   }
 
-  cholmod_sparse* cholmod_lhs = ss_.CreateSparseMatrix(tsm);
-  // The matrix is symmetric, and the upper triangular part of the
-  // matrix contains the values.
-  cholmod_lhs->stype = 1;
+  cholmod_sparse* cholmod_lhs = NULL;
+  if (options().use_postordering) {
+    // If we are going to do a full symbolic analysis of the schur
+    // complement matrix from scratch and not rely on the
+    // pre-ordering, then the fastest path in cholmod_factorize is the
+    // one corresponding to upper triangular matrices.
+
+    // Create a upper triangular symmetric matrix.
+    cholmod_lhs = ss_.CreateSparseMatrix(tsm);
+    cholmod_lhs->stype = 1;
+
+    if (factor_ == NULL) {
+      factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_);
+    }
+  } else {
+    // If we are going to use the natural ordering (i.e. rely on the
+    // pre-ordering computed by solver_impl.cc), then the fastest
+    // path in cholmod_factorize is the one corresponding to lower
+    // triangular matrices.
+
+    // Create a upper triangular symmetric matrix.
+    cholmod_lhs = ss_.CreateSparseMatrixTranspose(tsm);
+    cholmod_lhs->stype = -1;
+
+    if (factor_ == NULL) {
+      factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs);
+    }
+  }
 
   cholmod_dense*  cholmod_rhs =
       ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
-
-  // Symbolic factorization is computed if we don't already have one handy.
-  if (factor_ == NULL) {
-    factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_);
-  }
-
   cholmod_dense* cholmod_solution =
       ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs);
 
   ss_.Free(cholmod_lhs);
-  cholmod_lhs = NULL;
   ss_.Free(cholmod_rhs);
-  cholmod_rhs = NULL;
 
   if (cholmod_solution == NULL) {
     LOG(WARNING) << "CHOLMOD solve failed.";
@@ -339,7 +355,8 @@
 
   // Compute symbolic factorization if not available.
   if (cxsparse_factor_ == NULL) {
-    cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_));
+    cxsparse_factor_ =
+        CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_));
   }
 
   // Solve the linear system.
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index 1820bc9..57fd263 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -87,7 +87,8 @@
       int problem_id,
       bool regularization,
       ceres::LinearSolverType linear_solver_type,
-      ceres::SparseLinearAlgebraLibraryType sparse_linear_algebra_library) {
+      ceres::SparseLinearAlgebraLibraryType sparse_linear_algebra_library,
+      bool use_postordering) {
     SetUpFromProblemId(problem_id);
     LinearSolver::Options options;
     options.elimination_groups.push_back(num_eliminate_blocks);
@@ -95,6 +96,7 @@
         A->block_structure()->cols.size() - num_eliminate_blocks);
     options.type = linear_solver_type;
     options.sparse_linear_algebra_library = sparse_linear_algebra_library;
+    options.use_postordering = use_postordering;
 
     scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
 
@@ -129,32 +131,49 @@
   scoped_array<double> sol_d;
 };
 
+TEST_F(SchurComplementSolverTest, DenseSchurWithSmallProblem) {
+  ComputeAndCompareSolutions(2, false, DENSE_SCHUR, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, DENSE_SCHUR, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest, DenseSchurWithLargeProblem) {
+  ComputeAndCompareSolutions(3, false, DENSE_SCHUR, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, DENSE_SCHUR, SUITE_SPARSE, true);
+}
+
 #ifndef CERES_NO_SUITESPARSE
-TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparse) {
-  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, SUITE_SPARSE);
-  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, SUITE_SPARSE);
-  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, SUITE_SPARSE);
-  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, SUITE_SPARSE);
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparseSmallProblemNoPostOrdering) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, SUITE_SPARSE, false);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, SUITE_SPARSE, false);
+}
+
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparseSmallProblemPostOrdering) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparseLargeProblemNoPostOrdering) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, SUITE_SPARSE, false);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, SUITE_SPARSE, false);
+}
+
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparseLargeProblemPostOrdering) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, SUITE_SPARSE, true);
 }
 #endif  // CERES_NO_SUITESPARSE
 
 #ifndef CERES_NO_CXSPARSE
-TEST_F(SchurComplementSolverTest, SparseSchurWithCXSparse) {
-  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, CX_SPARSE);
-  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, CX_SPARSE);
-  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, CX_SPARSE);
-  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, CX_SPARSE);
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparseSmallProblem) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparseLargeProblem) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, SUITE_SPARSE, true);
 }
 #endif  // CERES_NO_CXSPARSE
 
-TEST_F(SchurComplementSolverTest, DenseSchur) {
-  // The sparse linear algebra library type is ignored for
-  // DENSE_SCHUR.
-  ComputeAndCompareSolutions(2, false, DENSE_SCHUR, SUITE_SPARSE);
-  ComputeAndCompareSolutions(3, false, DENSE_SCHUR, SUITE_SPARSE);
-  ComputeAndCompareSolutions(2, true, DENSE_SCHUR, SUITE_SPARSE);
-  ComputeAndCompareSolutions(3, true, DENSE_SCHUR, SUITE_SPARSE);
-}
-
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
index 5779299..52056f7 100644
--- a/internal/ceres/solver_impl.cc
+++ b/internal/ceres/solver_impl.cc
@@ -33,7 +33,9 @@
 #include <cstdio>
 #include <iostream>  // NOLINT
 #include <numeric>
+#include <string>
 #include "ceres/coordinate_descent_minimizer.h"
+#include "ceres/cxsparse.h"
 #include "ceres/evaluator.h"
 #include "ceres/gradient_checking_cost_function.h"
 #include "ceres/iteration_callback.h"
@@ -995,7 +997,9 @@
   }
 
   if (IsSchurType(options->linear_solver_type)) {
-    if (!ReorderProgramForSchurTypeLinearSolver(problem_impl->parameter_map(),
+    if (!ReorderProgramForSchurTypeLinearSolver(options->linear_solver_type,
+                                                options->sparse_linear_algebra_library,
+                                                problem_impl->parameter_map(),
                                                 linear_solver_ordering,
                                                 transformed_program.get(),
                                                 error)) {
@@ -1004,9 +1008,15 @@
     return transformed_program.release();
   }
 
-  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
-      options->sparse_linear_algebra_library == SUITE_SPARSE) {
-    ReorderProgramForSparseNormalCholesky(transformed_program.get());
+  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+    if (!ReorderProgramForSparseNormalCholesky(
+            options->sparse_linear_algebra_library,
+            linear_solver_ordering,
+            transformed_program.get(),
+            error)) {
+      return NULL;
+    }
+
     return transformed_program.release();
   }
 
@@ -1093,6 +1103,18 @@
   linear_solver_options.sparse_linear_algebra_library =
       options->sparse_linear_algebra_library;
   linear_solver_options.use_postordering = options->use_postordering;
+
+  // Ignore user's postordering preferences and force it to be true if
+  // cholmod_camd is not available. This ensures that the linear
+  // solver does not assume that a fill-reducing pre-ordering has been
+  // done.
+#if !defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CAMD)
+  if (IsSchurType(linear_solver_options.type) &&
+      linear_solver_options.sparse_linear_algebra_library == SUITE_SPARSE) {
+    linear_solver_options.use_postordering = true;
+  }
+#endif
+
   linear_solver_options.num_threads = options->num_linear_solver_threads;
   options->num_linear_solver_threads = linear_solver_options.num_threads;
 
@@ -1115,48 +1137,6 @@
   return LinearSolver::Create(linear_solver_options);
 }
 
-bool SolverImpl::ApplyUserOrdering(
-    const ProblemImpl::ParameterMap& parameter_map,
-    const ParameterBlockOrdering* ordering,
-    Program* program,
-    string* error) {
-  if (ordering->NumElements() != program->NumParameterBlocks()) {
-    *error = StringPrintf("User specified ordering does not have the same "
-                          "number of parameters as the problem. The problem"
-                          "has %d blocks while the ordering has %d blocks.",
-                          program->NumParameterBlocks(),
-                          ordering->NumElements());
-    return false;
-  }
-
-  vector<ParameterBlock*>* parameter_blocks =
-      program->mutable_parameter_blocks();
-  parameter_blocks->clear();
-
-  const map<int, set<double*> >& groups =
-      ordering->group_to_elements();
-
-  for (map<int, set<double*> >::const_iterator group_it = groups.begin();
-       group_it != groups.end();
-       ++group_it) {
-    const set<double*>& group = group_it->second;
-    for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
-         parameter_block_ptr_it != group.end();
-         ++parameter_block_ptr_it) {
-      ProblemImpl::ParameterMap::const_iterator parameter_block_it =
-          parameter_map.find(*parameter_block_ptr_it);
-      if (parameter_block_it == parameter_map.end()) {
-        *error = StringPrintf("User specified ordering contains a pointer "
-                              "to a double that is not a parameter block in "
-                              "the problem. The invalid double is in group: %d",
-                              group_it->first);
-        return false;
-      }
-      parameter_blocks->push_back(parameter_block_it->second);
-    }
-  }
-  return true;
-}
 
 // Find the minimum index of any parameter block to the given residual.
 // Parameter blocks that have indices greater than num_eliminate_blocks are
@@ -1364,64 +1344,51 @@
   LOG(WARNING) << msg;
 }
 
-bool SolverImpl::ReorderProgramForSchurTypeLinearSolver(
+bool SolverImpl::ApplyUserOrdering(
     const ProblemImpl::ParameterMap& parameter_map,
-    ParameterBlockOrdering* ordering,
+    const ParameterBlockOrdering* parameter_block_ordering,
     Program* program,
     string* error) {
-  // At this point one of two things is true.
-  //
-  //  1. The user did not specify an ordering - ordering has one group
-  //  containing all the parameter blocks.
-
-  //  2. The user specified an ordering, and the first group has
-  //  non-zero elements.
-  //
-  // We handle these two cases in turn.
-  if (ordering->NumGroups() == 1) {
-    // If the user supplied an ordering with just one
-    // group, it is equivalent to the user supplying NULL as an
-    // ordering. Ceres is completely free to choose the parameter
-    // block ordering as it sees fit. For Schur type solvers, this
-    // means that the user wishes for Ceres to identify the e_blocks,
-    // which we do by computing a maximal independent set.
-    vector<ParameterBlock*> schur_ordering;
-    const int num_eliminate_blocks = ComputeSchurOrdering(*program,
-                                                          &schur_ordering);
-
-    CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
-        << "Congratulations, you found a Ceres bug! Please report this error "
-        << "to the developers.";
-
-    // Update the ordering object.
-    for (int i = 0; i < schur_ordering.size(); ++i) {
-      double* parameter_block = schur_ordering[i]->mutable_user_state();
-      const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
-      ordering->AddElementToGroup(parameter_block, group_id);
-    }
-
-    // Apply the parameter block re-ordering. Technically we could
-    // call ApplyUserOrdering, but this is cheaper and simpler.
-    swap(*program->mutable_parameter_blocks(), schur_ordering);
-  } else {
-    // The user supplied an ordering.
-    if (!ApplyUserOrdering(parameter_map, ordering, program, error)) {
-      return false;
-    }
+  const int num_parameter_blocks =  program->NumParameterBlocks();
+  if (parameter_block_ordering->NumElements() != num_parameter_blocks) {
+    *error = StringPrintf("User specified ordering does not have the same "
+                          "number of parameters as the problem. The problem"
+                          "has %d blocks while the ordering has %d blocks.",
+                          num_parameter_blocks,
+                          parameter_block_ordering->NumElements());
+    return false;
   }
 
-  program->SetParameterOffsetsAndIndex();
+  vector<ParameterBlock*>* parameter_blocks =
+      program->mutable_parameter_blocks();
+  parameter_blocks->clear();
 
-  const int num_eliminate_blocks =
-      ordering->group_to_elements().begin()->second.size();
+  const map<int, set<double*> >& groups =
+      parameter_block_ordering->group_to_elements();
 
-  // Schur type solvers also require that their residual blocks be
-  // lexicographically ordered.
-  return LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
-                                              program,
-                                              error);
+  for (map<int, set<double*> >::const_iterator group_it = groups.begin();
+       group_it != groups.end();
+       ++group_it) {
+    const set<double*>& group = group_it->second;
+    for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
+         parameter_block_ptr_it != group.end();
+         ++parameter_block_ptr_it) {
+      ProblemImpl::ParameterMap::const_iterator parameter_block_it =
+          parameter_map.find(*parameter_block_ptr_it);
+      if (parameter_block_it == parameter_map.end()) {
+        *error = StringPrintf("User specified ordering contains a pointer "
+                              "to a double that is not a parameter block in "
+                              "the problem. The invalid double is in group: %d",
+                              group_it->first);
+        return false;
+      }
+      parameter_blocks->push_back(parameter_block_it->second);
+    }
+  }
+  return true;
 }
 
+
 TripletSparseMatrix* SolverImpl::CreateJacobianBlockSparsityTranspose(
     const Program* program) {
 
@@ -1468,34 +1435,185 @@
   return tsm;
 }
 
-void SolverImpl::ReorderProgramForSparseNormalCholesky(Program* program) {
-#ifndef CERES_NO_SUITESPARSE
-  // Set the offsets and index for CreateJacobianSparsityTranspose.
-  program->SetParameterOffsetsAndIndex();
+bool SolverImpl::ReorderProgramForSchurTypeLinearSolver(
+    const LinearSolverType linear_solver_type,
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ProblemImpl::ParameterMap& parameter_map,
+    ParameterBlockOrdering* parameter_block_ordering,
+    Program* program,
+    string* error) {
+  if (parameter_block_ordering->NumGroups() == 1) {
+    // If the user supplied an parameter_block_ordering with just one
+    // group, it is equivalent to the user supplying NULL as an
+    // parameter_block_ordering. Ceres is completely free to choose the
+    // parameter block ordering as it sees fit. For Schur type solvers,
+    // this means that the user wishes for Ceres to identify the
+    // e_blocks, which we do by computing a maximal independent set.
+    vector<ParameterBlock*> schur_ordering;
+    const int num_eliminate_blocks = ComputeSchurOrdering(*program,
+                                                          &schur_ordering);
 
-  // Compute a block sparse presentation of J'.
-  scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
-      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
+    CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
 
-  // Order rows using AMD.
-  SuiteSparse ss;
-  cholmod_sparse* block_jacobian_transpose =
-      ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+    // Update the parameter_block_ordering object.
+    for (int i = 0; i < schur_ordering.size(); ++i) {
+      double* parameter_block = schur_ordering[i]->mutable_user_state();
+      const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
+      parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
+    }
 
-  vector<int> ordering(program->NumParameterBlocks(), -1);
-  ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
-  ss.Free(block_jacobian_transpose);
+    // We could call ApplyUserOrdering but this is cheaper and
+    // simpler.
+    swap(*program->mutable_parameter_blocks(), schur_ordering);
+  } else {
+    // The user provided an ordering with more than one elimination
+    // group. Trust the user and apply the ordering.
+    if (!ApplyUserOrdering(parameter_map,
+                           parameter_block_ordering,
+                           program,
+                           error)) {
+      return false;
+    }
+  }
 
-  // Apply ordering.
-  vector<ParameterBlock*>& parameter_blocks =
-      *(program->mutable_parameter_blocks());
-  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
-  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
-    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+  // Pre-order the columns corresponding to the schur complement if
+  // possible.
+#if !defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CAMD)
+  if (linear_solver_type == SPARSE_SCHUR &&
+      sparse_linear_algebra_library_type == SUITE_SPARSE) {
+    vector<int> constraints;
+    vector<ParameterBlock*>& parameter_blocks =
+        *(program->mutable_parameter_blocks());
+
+    for (int i = 0; i < parameter_blocks.size(); ++i) {
+      constraints.push_back(
+          parameter_block_ordering->GroupId(
+              parameter_blocks[i]->mutable_user_state()));
+    }
+
+    // Set the offsets and index for CreateJacobianSparsityTranspose.
+    program->SetParameterOffsetsAndIndex();
+    // Compute a block sparse presentation of J'.
+    scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+        SolverImpl::CreateJacobianBlockSparsityTranspose(program));
+
+    SuiteSparse ss;
+    cholmod_sparse* block_jacobian_transpose =
+        ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+
+    vector<int> ordering(parameter_blocks.size(), 0);
+    ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+                                                   &constraints[0],
+                                                   &ordering[0]);
+    ss.Free(block_jacobian_transpose);
+
+    const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+    for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+      parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+    }
   }
 #endif
 
   program->SetParameterOffsetsAndIndex();
+  // Schur type solvers also require that their residual blocks be
+  // lexicographically ordered.
+  const int num_eliminate_blocks =
+      parameter_block_ordering->group_to_elements().begin()->second.size();
+  return LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
+                                              program,
+                                              error);
+}
+
+bool SolverImpl::ReorderProgramForSparseNormalCholesky(
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ParameterBlockOrdering* parameter_block_ordering,
+    Program* program,
+    string* error) {
+  // Set the offsets and index for CreateJacobianSparsityTranspose.
+  program->SetParameterOffsetsAndIndex();
+  // Compute a block sparse presentation of J'.
+  scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
+
+  vector<int> ordering(program->NumParameterBlocks(), 0);
+  vector<ParameterBlock*>& parameter_blocks =
+      *(program->mutable_parameter_blocks());
+
+  if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
+#ifdef CERES_NO_SUITESPARSE
+    *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITE_SPARSE because "
+        "SuiteSparse was not enabled when Ceres was built.";
+    return false;
+#else
+    SuiteSparse ss;
+    cholmod_sparse* block_jacobian_transpose =
+        ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+
+#  ifdef CERES_NO_CAMD
+    // No cholmod_camd, so ignore user's parameter_block_ordering and
+    // use plain old AMD.
+    ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
+#  else
+    if (parameter_block_ordering->NumGroups() > 1) {
+      // If the user specified more than one elimination groups use them
+      // to constrain the ordering.
+      vector<int> constraints;
+      for (int i = 0; i < parameter_blocks.size(); ++i) {
+        constraints.push_back(
+            parameter_block_ordering->GroupId(
+                parameter_blocks[i]->mutable_user_state()));
+      }
+      ss.ConstrainedApproximateMinimumDegreeOrdering(
+          block_jacobian_transpose,
+          &constraints[0],
+          &ordering[0]);
+    } else {
+      ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+                                          &ordering[0]);
+    }
+#  endif  // CERES_NO_CAMD
+
+    ss.Free(block_jacobian_transpose);
+#endif  // CERES_NO_SUITESPARSE
+
+  } else if (sparse_linear_algebra_library_type == CX_SPARSE) {
+#ifndef CERES_NO_CXSPARSE
+
+    // CXSparse works with J'J instead of J'. So compute the block
+    // sparsity for J'J and compute an approximate minimum degree
+    // ordering.
+    CXSparse cxsparse;
+    cs_di* block_jacobian_transpose;
+    block_jacobian_transpose =
+        cxsparse.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+    cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
+    cs_di* block_hessian =
+        cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
+    cxsparse.Free(block_jacobian);
+    cxsparse.Free(block_jacobian_transpose);
+
+    cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, &ordering[0]);
+    cxsparse.Free(block_hessian);
+#else  // CERES_NO_CXSPARSE
+    *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
+        "CXSparse was not enabled when Ceres was built.";
+    return false;
+#endif  // CERES_NO_CXSPARSE
+  } else {
+    *error = "Unknown sparse linear algebra library.";
+    return false;
+  }
+
+  // Apply ordering.
+  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+  }
+
+  program->SetParameterOffsetsAndIndex();
+  return true;
 }
 
 }  // namespace internal
diff --git a/internal/ceres/solver_impl.h b/internal/ceres/solver_impl.h
index 60fd2a2..ebfb813 100644
--- a/internal/ceres/solver_impl.h
+++ b/internal/ceres/solver_impl.h
@@ -103,15 +103,6 @@
   static LinearSolver* CreateLinearSolver(Solver::Options* options,
                                           string* error);
 
-  // Reorder the parameter blocks in program using the ordering. A
-  // return value of true indicates success and false indicates an
-  // error was encountered whose cause is logged to LOG(ERROR).
-  static bool ApplyUserOrdering(const ProblemImpl::ParameterMap& parameter_map,
-                                const ParameterBlockOrdering* ordering,
-                                Program* program,
-                                string* error);
-
-
   // Reorder the residuals for program, if necessary, so that the
   // residuals involving e block (i.e., the first num_eliminate_block
   // parameter blocks) occur together. This is a necessary condition
@@ -163,29 +154,6 @@
   static void AlternateLinearSolverForSchurTypeLinearSolver(
       Solver::Options* options);
 
-  // Schur type solvers require that all parameter blocks eliminated
-  // by the Schur eliminator occur before others and the residuals be
-  // sorted in lexicographic order of their parameter blocks.
-  //
-  // If ordering has at least two groups, then apply the ordering,
-  // otherwise compute a new ordering using a Maximal Independent Set
-  // algorithm and apply it.
-  //
-  // Upon return, ordering contains the parameter block ordering that
-  // was used to order the program.
-  static bool ReorderProgramForSchurTypeLinearSolver(
-      const ProblemImpl::ParameterMap& parameter_map,
-      ParameterBlockOrdering* ordering,
-      Program* program,
-      string* error);
-
-  // CHOLMOD when doing the sparse cholesky factorization of the
-  // Jacobian matrix, reorders its columns to reduce the
-  // fill-in. Compute this permutation and re-order the parameter
-  // blocks.
-  //
-  static void ReorderProgramForSparseNormalCholesky(Program* program);
-
   // Create a TripletSparseMatrix which contains the zero-one
   // structure corresponding to the block sparsity of the transpose of
   // the Jacobian matrix.
@@ -193,6 +161,53 @@
   // Caller owns the result.
   static TripletSparseMatrix* CreateJacobianBlockSparsityTranspose(
       const Program* program);
+
+  // Reorder the parameter blocks in program using the ordering
+  static bool ApplyUserOrdering(
+      const ProblemImpl::ParameterMap& parameter_map,
+      const ParameterBlockOrdering* parameter_block_ordering,
+      Program* program,
+      string* error);
+
+  // Sparse cholesky factorization routines when doing the sparse
+  // cholesky factorization of the Jacobian matrix, reorders its
+  // columns to reduce the fill-in. Compute this permutation and
+  // re-order the parameter blocks.
+  //
+  // If the parameter_block_ordering contains more than one
+  // elimination group and support for constrained fill-reducing
+  // ordering is available in the sparse linear algebra library
+  // (SuiteSparse version >= 4.2.0) then the fill reducing
+  // ordering will take it into account, otherwise it will be ignored.
+  static bool ReorderProgramForSparseNormalCholesky(
+      const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+      const ParameterBlockOrdering* parameter_block_ordering,
+      Program* program,
+      string* error);
+
+  // Schur type solvers require that all parameter blocks eliminated
+  // by the Schur eliminator occur before others and the residuals be
+  // sorted in lexicographic order of their parameter blocks.
+  //
+  // If the parameter_block_ordering only contains one elimination
+  // group then a maximal independent set is computed and used as the
+  // first elimination group, otherwise the user's ordering is used.
+  //
+  // If the linear solver type is SPARSE_SCHUR and support for
+  // constrained fill-reducing ordering is available in the sparse
+  // linear algebra library (SuiteSparse version >= 4.2.0) then
+  // columns of the schur complement matrix are ordered to reduce the
+  // fill-in the Cholesky factorization.
+  //
+  // Upon return, ordering contains the parameter block ordering that
+  // was used to order the program.
+  static bool ReorderProgramForSchurTypeLinearSolver(
+      const LinearSolverType linear_solver_type,
+      const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+      const ProblemImpl::ParameterMap& parameter_map,
+      ParameterBlockOrdering* parameter_block_ordering,
+      Program* program,
+      string* error);
 };
 
 }  // namespace internal
diff --git a/internal/ceres/solver_impl_test.cc b/internal/ceres/solver_impl_test.cc
index a752eff..e99a3de 100644
--- a/internal/ceres/solver_impl_test.cc
+++ b/internal/ceres/solver_impl_test.cc
@@ -846,5 +846,83 @@
   EXPECT_EQ(options.linear_solver_type, CGNR);
   EXPECT_EQ(options.preconditioner_type, JACOBI);
 }
+
+TEST(SolverImpl, CreateJacobianBlockSparsityTranspose) {
+  ProblemImpl problem;
+  double x[2];
+  double y[3];
+  double z;
+
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 0, 0>(), NULL, x);
+  problem.AddResidualBlock(new MockCostFunctionBase<3, 1, 2, 0>(), NULL, &z, x);
+  problem.AddResidualBlock(new MockCostFunctionBase<4, 1, 3, 0>(), NULL, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<5, 1, 3, 0>(), NULL, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 1, 0>(), NULL, x, &z);
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 1, 3, 0>(), NULL, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 1, 0>(), NULL, x, &z);
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 3, 0, 0>(), NULL, y);
+
+  TripletSparseMatrix expected_block_sparse_jacobian(3, 8, 14);
+  {
+    int* rows = expected_block_sparse_jacobian.mutable_rows();
+    int* cols = expected_block_sparse_jacobian.mutable_cols();
+    double* values = expected_block_sparse_jacobian.mutable_values();
+    rows[0] = 0;
+    cols[0] = 0;
+
+    rows[1] = 2;
+    cols[1] = 1;
+    rows[2] = 0;
+    cols[2] = 1;
+
+    rows[3] = 2;
+    cols[3] = 2;
+    rows[4] = 1;
+    cols[4] = 2;
+
+    rows[5] = 2;
+    cols[5] = 3;
+    rows[6] = 1;
+    cols[6] = 3;
+
+    rows[7] = 0;
+    cols[7] = 4;
+    rows[8] = 2;
+    cols[8] = 4;
+
+    rows[9] = 2;
+    cols[9] = 5;
+    rows[10] = 1;
+    cols[10] = 5;
+
+    rows[11] = 0;
+    cols[11] = 6;
+    rows[12] = 2;
+    cols[12] = 6;
+
+    rows[13] = 1;
+    cols[13] = 7;
+    fill(values, values + 14, 1.0);
+    expected_block_sparse_jacobian.set_num_nonzeros(14);
+  }
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
+
+  Matrix expected_dense_jacobian;
+  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+  Matrix actual_dense_jacobian;
+  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index bc1f983..9601142 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -133,34 +133,37 @@
   // factorized. CHOLMOD/SuiteSparse on the other hand can just work
   // off of Jt to compute the Cholesky factorization of the normal
   // equations.
-  cs_di* A2 = cs_transpose(&At, 1);
-  cs_di* AtA = cs_multiply(&At, A2);
+  cs_di* A2 = cxsparse_.TransposeMatrix(&At);
+  cs_di* AtA = cxsparse_.MatrixMatrixMultiply(&At, A2);
 
   cxsparse_.Free(A2);
   if (per_solve_options.D != NULL) {
     A->DeleteRows(num_cols);
   }
-
   event_logger.AddEvent("Setup");
 
   // Compute symbolic factorization if not available.
   if (cxsparse_factor_ == NULL) {
-    cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.AnalyzeCholesky(AtA));
+    if (options_.use_postordering) {
+      cxsparse_factor_ =
+          CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(AtA,
+                                                       A->col_blocks(),
+                                                       A->col_blocks()));
+    } else {
+      cxsparse_factor_ =
+          CHECK_NOTNULL(cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA));
+    }
   }
-
   event_logger.AddEvent("Analysis");
 
-
   // Solve the linear system.
   if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) {
     VectorRef(x, Atb.rows()) = Atb;
     summary.termination_type = TOLERANCE;
   }
-
   event_logger.AddEvent("Solve");
 
   cxsparse_.Free(AtA);
-
   event_logger.AddEvent("Teardown");
   return summary;
 }
@@ -205,11 +208,13 @@
 
   if (factor_ == NULL) {
     if (options_.use_postordering) {
-      factor_ = ss_.BlockAnalyzeCholesky(&lhs,
-                                         A->col_blocks(),
-                                         A->row_blocks());
+      factor_ =
+          CHECK_NOTNULL(ss_.BlockAnalyzeCholesky(&lhs,
+                                                 A->col_blocks(),
+                                                 A->row_blocks()));
     } else {
-      factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs);
+      factor_ =
+      CHECK_NOTNULL(ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs));
     }
   }
 
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index fe2edd3..57d12a1 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -323,6 +323,21 @@
   cholmod_amd(matrix, NULL, 0, ordering, &cc_);
 }
 
+void SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
+    cholmod_sparse* matrix,
+    int* constraints,
+    int* ordering) {
+#ifndef CERES_NO_CAMD
+  cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_);
+#else
+  LOG(FATAL) << "Congratulations you have found a bug in Ceres."
+             << "Ceres Solver was compiled with SuiteSparse "
+             << "version 4.1.0 or less. Calling this function "
+             << "in that case is a bug. Please contact the"
+             << "the Ceres Solver developers".
+#endif
+}
+
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index e138623..27182b8 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -33,6 +33,7 @@
 #ifndef CERES_INTERNAL_SUITESPARSE_H_
 #define CERES_INTERNAL_SUITESPARSE_H_
 
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include <cstring>
@@ -43,6 +44,20 @@
 #include "cholmod.h"
 #include "glog/logging.h"
 
+// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
+// if SuiteSparse was compiled with Metis support. This makes
+// calling and linking into cholmod_camd problematic even though it
+// has nothing to do with Metis. This has been fixed reliably in
+// 4.2.0.
+//
+// The fix was actually committed in 4.1.0, but there is
+// some confusion about a silent update to the tar ball, so we are
+// being conservative and choosing the next minor version where
+// things are stable.
+#if (SUITESPARSE_VERSION<4002)
+#define CERES_NO_CAMD
+#endif
+
 namespace ceres {
 namespace internal {
 
@@ -189,6 +204,38 @@
   // ordering.
   void ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
 
+
+  // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
+  // if SuiteSparse was compiled with Metis support. This makes
+  // calling and linking into cholmod_camd problematic even though it
+  // has nothing to do with Metis. This has been fixed reliably in
+  // 4.2.0.
+  //
+  // The fix was actually committed in 4.1.0, but there is
+  // some confusion about a silent update to the tar ball, so we are
+  // being conservative and choosing the next minor version where
+  // things are stable.
+  static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
+    return (SUITESPARSE_VERSION>4001);
+  }
+
+  // Find a fill reducing approximate minimum degree
+  // ordering. constraints is an array which associates with each
+  // column of the matrix an elimination group. i.e., all columns in
+  // group 0 are eliminated first, all columns in group 1 are
+  // eliminated next etc. This function finds a fill reducing ordering
+  // that obeys these constraints.
+  //
+  // Calling ApproximateMinimumDegreeOrdering is equivalent to calling
+  // ConstrainedApproximateMinimumDegreeOrdering with a constraint
+  // array that puts all columns in the same elimination group.
+  //
+  // If CERES_NO_CAMD is defined then calling this function will
+  // result in a crash.
+  void ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+                                                   int* constraints,
+                                                   int* ordering);
+
   void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
   void Free(cholmod_dense* m)  { cholmod_free_dense(&m, &cc_);  }
   void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
diff --git a/internal/ceres/unsymmetric_linear_solver_test.cc b/internal/ceres/unsymmetric_linear_solver_test.cc
index c8a15c0..34b03be 100644
--- a/internal/ceres/unsymmetric_linear_solver_test.cc
+++ b/internal/ceres/unsymmetric_linear_solver_test.cc
@@ -56,12 +56,7 @@
     sol_regularized_.reset(problem->x_D.release());
   }
 
-  void TestSolver(
-      LinearSolverType linear_solver_type,
-      SparseLinearAlgebraLibraryType sparse_linear_algebra_library) {
-    LinearSolver::Options options;
-    options.type = linear_solver_type;
-    options.sparse_linear_algebra_library = sparse_linear_algebra_library;
+  void TestSolver(const LinearSolver::Options& options) {
     scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
 
     LinearSolver::PerSolveOptions per_solve_options;
@@ -72,13 +67,22 @@
 
     scoped_ptr<SparseMatrix> transformed_A;
 
-    if (linear_solver_type == DENSE_QR ||
-        linear_solver_type == DENSE_NORMAL_CHOLESKY) {
+    if (options.type == DENSE_QR ||
+        options.type == DENSE_NORMAL_CHOLESKY) {
       transformed_A.reset(new DenseSparseMatrix(*A_));
-    } else if (linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
-      transformed_A.reset(new   CompressedRowSparseMatrix(*A_));
+    } else if (options.type == SPARSE_NORMAL_CHOLESKY) {
+      CompressedRowSparseMatrix* crsm =  new CompressedRowSparseMatrix(*A_);
+      // Add row/column blocks structure.
+      for (int i = 0; i < A_->num_rows(); ++i) {
+        crsm->mutable_row_blocks()->push_back(1);
+      }
+
+      for (int i = 0; i < A_->num_cols(); ++i) {
+        crsm->mutable_col_blocks()->push_back(1);
+      }
+      transformed_A.reset(crsm);
     } else {
-      LOG(FATAL) << "Unknown linear solver : " << linear_solver_type;
+      LOG(FATAL) << "Unknown linear solver : " << options.type;
     }
     // Unregularized
     unregularized_solve_summary =
@@ -115,22 +119,50 @@
 };
 
 TEST_F(UnsymmetricLinearSolverTest, DenseQR) {
-  TestSolver(DENSE_QR, SUITE_SPARSE);
+  LinearSolver::Options options;
+  options.type = DENSE_QR;
+  TestSolver(options);
 }
 
 TEST_F(UnsymmetricLinearSolverTest, DenseNormalCholesky) {
-  TestSolver(DENSE_NORMAL_CHOLESKY, SUITE_SPARSE);
+  LinearSolver::Options options;
+  options.type = DENSE_NORMAL_CHOLESKY;
+  TestSolver(options);
 }
 
 #ifndef CERES_NO_SUITESPARSE
-TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingSuiteSparse) {
-  TestSolver(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE);
+TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingSuiteSparsePreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library = SUITE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingSuiteSparsePostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library = SUITE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  TestSolver(options);
 }
 #endif
 
 #ifndef CERES_NO_CXSPARSE
-TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingCXSparse) {
-  TestSolver(SPARSE_NORMAL_CHOLESKY, CX_SPARSE);
+TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingCXSparsePreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library = CX_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingCXSparsePostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library = CX_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  TestSolver(options);
 }
 #endif