Integrate InnerProductComputer

Despite its relative size, this is very significant change
to Ceres.

Why
===

Up till now, when the user chose SPARSE_NORMAL_CHOLESKY,
the Jacobian was evaluated in a CompressedRowSparseMatrix,
which was then use to compute the normal equations which were
passed to a sparse linear algebra library for factorization.

The reason to do this was because in the case of SuiteSparse,
we were able to pass the Jacobian matrix directly without
computing the normal equations and SuiteSparse/CHOLMOD did the
normal equation computation.

This turned out to be slow, so Cheng Wang implemented a high
performance version of the matrix-matrix multiply to compute
the normal equations, and all the sparse linear algebra libraries
now are passed the normal equations.

So that raises the question, as to what the best representation
of the Jacobian which is suitable for the normal equation computation.

Turns out BlockSparseMatrix is ideal. It brings two advantages.

1. Jacobian evaluation into a BlockSparseMatrix is considerably
   faster when using a BlockSparseMatrix than
   CompressedRowSparseMatrix. This is because we save on a bunch
   of memory copies.

2. To make the matrix multiplication fast and use the block structure
   Cheng Wang had to essentially make the CompressedRowSparseMatrix
   carry a bunch of sidecar information about the block sparsity,
   essentially making it behave like a BlockSparseMatrix. The resulting
   code had fairly complicated indexing and complicated the semantics
   of CompressedRowSparseMatrix. The new InnerProductComputer class
   does away with all that and once this CL goes in, I will be able to
   remove all that code and simplify the semantics of
   CompressedRowSparseMatrix.

Changes
=======

1. Use InnerProductComputer in SparseNormalCholeskySolver.
2. Change the evaluator instantiated for SPARSE_NORMAL_CHOLESKY with
   static sparsity inside evaluator.cc
3. The former change necessitates that we change ProblemImpl::Evaluate
   to create the evaluate it needs on its own, because it was
   depending on passing "SPARSE_NORMAL_CHOLESKY" as linear solver type
   to the evaluator factor to get an Evaluator which can use
   CompressedRowSparseMatrix objects for storing the Jacobian.
4. Update the tests for SparseNormalCholeskySolver.
5. Separate out the tests for DynamicSparseNormalCholeskySolver into its
   own file.

Change-Id: I2ef7ef8fbfbb4967d0c1ec2068c1c778248fdf5b
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
index 052e7a0..357fae7 100644
--- a/internal/ceres/CMakeLists.txt
+++ b/internal/ceres/CMakeLists.txt
@@ -307,6 +307,7 @@
   ceres_test(dynamic_autodiff_cost_function)
   ceres_test(dynamic_compressed_row_sparse_matrix)
   ceres_test(dynamic_numeric_diff_cost_function)
+  ceres_test(dynamic_sparse_normal_cholesky_solver)
   ceres_test(dynamic_sparsity)
   ceres_test(evaluator)
   ceres_test(gradient_checker)
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
new file mode 100644
index 0000000..3fdf6a1
--- /dev/null
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
@@ -0,0 +1,128 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#include "Eigen/Cholesky"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): These tests needs to be re-written to be more
+// thorough, they do not really test the dynamic nature of the
+// sparsity.
+class DynamicSparseNormalCholeskySolverTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    scoped_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(1));
+    A_.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(
+        *down_cast<TripletSparseMatrix*>(problem->A.get())));
+    b_.reset(problem->b.release());
+    D_.reset(problem->D.release());
+  }
+
+  void TestSolver(const LinearSolver::Options& options, double* D) {
+    Matrix dense_A;
+    A_->ToDenseMatrix(&dense_A);
+    Matrix lhs = dense_A.transpose() * dense_A;
+    if (D != NULL) {
+      lhs += (ConstVectorRef(D, A_->num_cols()).array() *
+              ConstVectorRef(D, A_->num_cols()).array())
+                 .matrix()
+                 .asDiagonal();
+    }
+
+    Vector rhs(A_->num_cols());
+    rhs.setZero();
+    A_->LeftMultiply(b_.get(), rhs.data());
+    Vector expected_solution = lhs.llt().solve(rhs);
+
+    scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
+    LinearSolver::PerSolveOptions per_solve_options;
+    per_solve_options.D = D;
+    Vector actual_solution(A_->num_cols());
+    LinearSolver::Summary summary;
+    summary = solver->Solve(
+        A_.get(), b_.get(), per_solve_options, actual_solution.data());
+
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+    for (int i = 0; i < A_->num_cols(); ++i) {
+      EXPECT_NEAR(expected_solution(i), actual_solution(i), 1e-8)
+          << "\nExpected: " << expected_solution.transpose()
+          << "\nActual: " << actual_solution.transpose();
+    }
+  }
+
+  void TestSolver(
+      const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+    LinearSolver::Options options;
+    options.type = SPARSE_NORMAL_CHOLESKY;
+    options.dynamic_sparsity = true;
+    options.sparse_linear_algebra_library_type =
+        sparse_linear_algebra_library_type;
+    TestSolver(options, NULL);
+    TestSolver(options, D_.get());
+  }
+
+  scoped_ptr<CompressedRowSparseMatrix> A_;
+  scoped_array<double> b_;
+  scoped_array<double> D_;
+};
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(DynamicSparseNormalCholeskySolverTest, SuiteSparse) {
+  TestSolver(SUITE_SPARSE);
+}
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(DynamicSparseNormalCholeskySolverTest, CXSparse) {
+  TestSolver(CX_SPARSE);
+}
+#endif
+
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(DynamicSparseNormalCholeskySolverTest, Eigen) {
+  TestSolver(EIGEN_SPARSE);
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
index baba9af..6193ae8 100644
--- a/internal/ceres/evaluator.cc
+++ b/internal/ceres/evaluator.cc
@@ -71,9 +71,9 @@
                                     DynamicCompressedRowJacobianFinalizer>(
                                         options, program);
       } else {
-        return new ProgramEvaluator<ScratchEvaluatePreparer,
-                                    CompressedRowJacobianWriter>(options,
-                                                                 program);
+        return new ProgramEvaluator<BlockEvaluatePreparer,
+                                    BlockJacobianWriter>(options,
+                                                         program);
       }
 
     default:
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index 4abea8b..9861958 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -39,15 +39,19 @@
 #include <utility>
 #include <vector>
 #include "ceres/casts.h"
+#include "ceres/compressed_row_jacobian_writer.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/cost_function.h"
 #include "ceres/crs_matrix.h"
 #include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
 #include "ceres/loss_function.h"
 #include "ceres/map_util.h"
 #include "ceres/parameter_block.h"
 #include "ceres/program.h"
+#include "ceres/program_evaluator.h"
 #include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
 #include "ceres/stl_util.h"
 #include "ceres/stringprintf.h"
 #include "glog/logging.h"
@@ -752,24 +756,10 @@
   evaluator_options.num_threads = evaluate_options.num_threads;
 #endif  // CERES_USE_OPENMP
 
-  string error;
   scoped_ptr<Evaluator> evaluator(
-      Evaluator::Create(evaluator_options, &program, &error));
-  if (evaluator.get() == NULL) {
-    LOG(ERROR) << "Unable to create an Evaluator object. "
-               << "Error: " << error
-               << "This is a Ceres bug; please contact the developers!";
-
-    // Make the parameter blocks that were temporarily marked
-    // constant, variable again.
-    for (int i = 0; i < variable_parameter_blocks.size(); ++i) {
-      variable_parameter_blocks[i]->SetVarying();
-    }
-
-    program_->SetParameterBlockStatePtrsToUserStatePtrs();
-    program_->SetParameterOffsetsAndIndex();
-    return false;
-  }
+      new ProgramEvaluator<ScratchEvaluatePreparer,
+                           CompressedRowJacobianWriter>(evaluator_options,
+                                                        &program));
 
   if (residuals !=NULL) {
     residuals->resize(evaluator->NumResiduals());
diff --git a/internal/ceres/sparse_cholesky_test.cc b/internal/ceres/sparse_cholesky_test.cc
index e18f20a..c94beea 100644
--- a/internal/ceres/sparse_cholesky_test.cc
+++ b/internal/ceres/sparse_cholesky_test.cc
@@ -35,7 +35,9 @@
 
 #include "Eigen/Dense"
 #include "Eigen/SparseCore"
+#include "ceres/block_sparse_matrix.h"
 #include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/random.h"
@@ -45,14 +47,12 @@
 namespace ceres {
 namespace internal {
 
-CompressedRowSparseMatrix* CreateRandomSymmetricPositiveDefiniteMatrix(
-    const int num_col_blocks,
-    const int min_col_block_size,
-    const int max_col_block_size,
-    const double block_density,
-    const CompressedRowSparseMatrix::StorageType storage_type) {
+BlockSparseMatrix* CreateRandomFullRankMatrix(const int num_col_blocks,
+                                              const int min_col_block_size,
+                                              const int max_col_block_size,
+                                              const double block_density) {
   // Create a random matrix
-  CompressedRowSparseMatrix::RandomMatrixOptions options;
+  BlockSparseMatrix::RandomMatrixOptions options;
   options.num_col_blocks = num_col_blocks;
   options.min_col_block_size = min_col_block_size;
   options.max_col_block_size = max_col_block_size;
@@ -61,23 +61,16 @@
   options.min_row_block_size = 1;
   options.max_row_block_size = max_col_block_size;
   options.block_density = block_density;
-  scoped_ptr<CompressedRowSparseMatrix> random_crsm(
-      CompressedRowSparseMatrix::CreateRandomMatrix(options));
+  scoped_ptr<BlockSparseMatrix> random_matrix(
+      BlockSparseMatrix::CreateRandomMatrix(options));
 
   // Add a diagonal block sparse matrix to make it full rank.
-  Vector diagonal = Vector::Ones(random_crsm->num_cols());
-  scoped_ptr<CompressedRowSparseMatrix> block_diagonal(
-      CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
-          diagonal.data(), random_crsm->col_blocks()));
-  random_crsm->AppendRows(*block_diagonal);
-
-  // Compute output = random_crsm' * random_crsm
-  std::vector<int> program;
-  CompressedRowSparseMatrix* output =
-      CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
-          *random_crsm, storage_type, &program);
-  CompressedRowSparseMatrix::ComputeOuterProduct(*random_crsm, program, output);
-  return output;
+  Vector diagonal = Vector::Ones(random_matrix->num_cols());
+  scoped_ptr<BlockSparseMatrix> block_diagonal(
+      BlockSparseMatrix::CreateDiagonalMatrix(
+          diagonal.data(), random_matrix->block_structure()->cols));
+  random_matrix->AppendRows(*block_diagonal);
+  return random_matrix.release();
 }
 
 bool ComputeExpectedSolution(const CompressedRowSparseMatrix& lhs,
@@ -119,12 +112,13 @@
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky->StorageType();
 
-  scoped_ptr<CompressedRowSparseMatrix> lhs(
-      CreateRandomSymmetricPositiveDefiniteMatrix(num_blocks,
-                                                  min_block_size,
-                                                  max_block_size,
-                                                  block_density,
-                                                  storage_type));
+  scoped_ptr<BlockSparseMatrix> m(CreateRandomFullRankMatrix(
+      num_blocks, min_block_size, max_block_size, block_density));
+  scoped_ptr<InnerProductComputer> inner_product_computer(
+      InnerProductComputer::Create(*m, storage_type));
+  inner_product_computer->Compute();
+  CompressedRowSparseMatrix* lhs = inner_product_computer->mutable_result();
+
   if (!use_block_structure) {
     lhs->mutable_row_blocks()->clear();
     lhs->mutable_col_blocks()->clear();
@@ -137,7 +131,7 @@
   EXPECT_TRUE(ComputeExpectedSolution(*lhs, rhs, &expected));
   std::string message;
   EXPECT_EQ(sparse_cholesky->FactorAndSolve(
-                lhs.get(), rhs.data(), actual.data(), &message),
+                lhs, rhs.data(), actual.data(), &message),
             LINEAR_SOLVER_SUCCESS);
   Matrix eigen_lhs;
   lhs->ToDenseMatrix(&eigen_lhs);
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index 3253548..37f5a8e 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
 // http://ceres-solver.org/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,8 @@
 #include <cstring>
 #include <ctime>
 
-#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/linear_solver.h"
@@ -57,7 +58,7 @@
 SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {}
 
 LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
-    CompressedRowSparseMatrix* A,
+    BlockSparseMatrix* A,
     const double* b,
     const LinearSolver::PerSolveOptions& per_solve_options,
     double* x) {
@@ -75,35 +76,32 @@
   if (per_solve_options.D != NULL) {
     // Temporarily append a diagonal block to the A matrix, but undo
     // it before returning the matrix to the user.
-    scoped_ptr<CompressedRowSparseMatrix> regularizer;
-    if (A->col_blocks().size() > 0) {
-      regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
-          per_solve_options.D, A->col_blocks()));
-    } else {
-      regularizer.reset(
-          new CompressedRowSparseMatrix(per_solve_options.D, num_cols));
-    }
+    scoped_ptr<BlockSparseMatrix> regularizer;
+    regularizer.reset(BlockSparseMatrix::CreateDiagonalMatrix(
+        per_solve_options.D, A->block_structure()->cols));
+    event_logger.AddEvent("Diagonal");
     A->AppendRows(*regularizer);
+    event_logger.AddEvent("Append");
   }
   event_logger.AddEvent("Append Rows");
 
-  if (outer_product_.get() == NULL) {
-    outer_product_.reset(
-        CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
-            *A, sparse_cholesky_->StorageType(), &pattern_));
-    event_logger.AddEvent("Outer Product Program");
+  if (inner_product_computer_.get() == NULL) {
+    inner_product_computer_.reset(
+        InnerProductComputer::Create(*A, sparse_cholesky_->StorageType()));
+
+    event_logger.AddEvent("InnerProductComputer::Create");
   }
 
-  CompressedRowSparseMatrix::ComputeOuterProduct(
-      *A, pattern_, outer_product_.get());
-  event_logger.AddEvent("Outer Product");
+  inner_product_computer_->Compute();
+  event_logger.AddEvent("InnerProductComputer::Compute");
+
+  // TODO(sameeragarwal):
 
   if (per_solve_options.D != NULL) {
-    A->DeleteRows(num_cols);
+    A->DeleteRowBlocks(A->block_structure()->cols.size());
   }
-
   summary.termination_type = sparse_cholesky_->FactorAndSolve(
-      outer_product_.get(), x, x, &summary.message);
+      inner_product_computer_->mutable_result(), x, x, &summary.message);
   event_logger.AddEvent("Factor & Solve");
   return summary;
 }
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
index 597cff7..e24799d 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
 // http://ceres-solver.org/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -45,26 +45,26 @@
 namespace internal {
 
 class CompressedRowSparseMatrix;
+class InnerProductComputer;
 class SparseCholesky;
 
 // Solves the normal equations (A'A + D'D) x = A'b, using the sparse
 // linear algebra library of the user's choice.
-class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
+class SparseNormalCholeskySolver : public BlockSparseMatrixSolver {
  public:
   explicit SparseNormalCholeskySolver(const LinearSolver::Options& options);
   virtual ~SparseNormalCholeskySolver();
 
  private:
   virtual LinearSolver::Summary SolveImpl(
-      CompressedRowSparseMatrix* A,
+      BlockSparseMatrix* A,
       const double* b,
       const LinearSolver::PerSolveOptions& options,
       double* x);
 
   const LinearSolver::Options options_;
   scoped_ptr<SparseCholesky> sparse_cholesky_;
-  scoped_ptr<CompressedRowSparseMatrix> outer_product_;
-  std::vector<int> pattern_;
+  scoped_ptr<InnerProductComputer> inner_product_computer_;
   CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
 };
 
diff --git a/internal/ceres/sparse_normal_cholesky_solver_test.cc b/internal/ceres/sparse_normal_cholesky_solver_test.cc
index ea4965e..2546c16 100644
--- a/internal/ceres/sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver_test.cc
@@ -28,8 +28,8 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/block_sparse_matrix.h"
 #include "ceres/casts.h"
-#include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/linear_least_squares_problems.h"
 #include "ceres/linear_solver.h"
@@ -38,12 +38,14 @@
 #include "glog/logging.h"
 #include "gtest/gtest.h"
 
+#include "Eigen/Cholesky"
+
 namespace ceres {
 namespace internal {
 
 // TODO(sameeragarwal): These tests needs to be re-written, since
 // SparseNormalCholeskySolver is a composition of two classes now,
-// OuterProduct and SparseCholesky.
+// InnerProductComputer and SparseCholesky.
 //
 // So the test should exercise the composition, rather than the
 // numerics of the solver, which are well covered by tests for those
@@ -52,88 +54,55 @@
  protected:
   virtual void SetUp() {
     scoped_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(0));
+        CreateLinearLeastSquaresProblemFromId(2));
 
     CHECK_NOTNULL(problem.get());
-    A_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+    A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
     b_.reset(problem->b.release());
     D_.reset(problem->D.release());
-    sol_unregularized_.reset(problem->x.release());
-    sol_regularized_.reset(problem->x_D.release());
+  }
+
+  void TestSolver(const LinearSolver::Options& options, double* D) {
+    Matrix dense_A;
+    A_->ToDenseMatrix(&dense_A);
+    Matrix lhs = dense_A.transpose() * dense_A;
+    if (D != NULL) {
+      lhs += (ConstVectorRef(D, A_->num_cols()).array() *
+              ConstVectorRef(D, A_->num_cols()).array())
+                 .matrix()
+                 .asDiagonal();
+    }
+
+    Vector rhs(A_->num_cols());
+    rhs.setZero();
+    A_->LeftMultiply(b_.get(), rhs.data());
+    Vector expected_solution = lhs.llt().solve(rhs);
+
+    scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
+    LinearSolver::PerSolveOptions per_solve_options;
+    per_solve_options.D = D;
+    Vector actual_solution(A_->num_cols());
+    LinearSolver::Summary summary;
+    summary = solver->Solve(
+        A_.get(), b_.get(), per_solve_options, actual_solution.data());
+
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+    for (int i = 0; i < A_->num_cols(); ++i) {
+      EXPECT_NEAR(expected_solution(i), actual_solution(i), 1e-8)
+          << "\nExpected: " << expected_solution.transpose()
+          << "\nActual: " << actual_solution.transpose();
+    }
   }
 
   void TestSolver(const LinearSolver::Options& options) {
-    LinearSolver::PerSolveOptions per_solve_options;
-    LinearSolver::Summary unregularized_solve_summary;
-    LinearSolver::Summary regularized_solve_summary;
-    Vector x_unregularized(A_->num_cols());
-    Vector x_regularized(A_->num_cols());
-
-    scoped_ptr<SparseMatrix> transformed_A;
-
-    CompressedRowSparseMatrix* crsm =
-        CompressedRowSparseMatrix::FromTripletSparseMatrix(*A_);
-    // Add row/column blocks structure.
-    for (int i = 0; i < A_->num_rows(); ++i) {
-      crsm->mutable_row_blocks()->push_back(1);
-    }
-
-    for (int i = 0; i < A_->num_cols(); ++i) {
-      crsm->mutable_col_blocks()->push_back(1);
-    }
-
-    // With all blocks of size 1, crsb_rows and crsb_cols are equivalent to
-    // rows and cols.
-    std::copy(crsm->rows(),
-              crsm->rows() + crsm->num_rows() + 1,
-              std::back_inserter(*crsm->mutable_crsb_rows()));
-
-    std::copy(crsm->cols(),
-              crsm->cols() + crsm->num_nonzeros(),
-              std::back_inserter(*crsm->mutable_crsb_cols()));
-
-    transformed_A.reset(crsm);
-
-    // Unregularized
-    scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
-    unregularized_solve_summary = solver->Solve(transformed_A.get(),
-                                                b_.get(),
-                                                per_solve_options,
-                                                x_unregularized.data());
-
-    // Sparsity structure is changing, reset the solver.
-    solver.reset(LinearSolver::Create(options));
-    // Regularized solution
-    per_solve_options.D = D_.get();
-    regularized_solve_summary = solver->Solve(
-        transformed_A.get(), b_.get(), per_solve_options, x_regularized.data());
-
-    EXPECT_EQ(unregularized_solve_summary.termination_type,
-              LINEAR_SOLVER_SUCCESS);
-
-    for (int i = 0; i < A_->num_cols(); ++i) {
-      EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8)
-          << "\nExpected: "
-          << ConstVectorRef(sol_unregularized_.get(), A_->num_cols())
-                 .transpose()
-          << "\nActual: " << x_unregularized.transpose();
-    }
-
-    EXPECT_EQ(regularized_solve_summary.termination_type,
-              LINEAR_SOLVER_SUCCESS);
-    for (int i = 0; i < A_->num_cols(); ++i) {
-      EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8)
-          << "\nExpected: "
-          << ConstVectorRef(sol_regularized_.get(), A_->num_cols()).transpose()
-          << "\nActual: " << x_regularized.transpose();
-    }
+    TestSolver(options, NULL);
+    TestSolver(options, D_.get());
   }
 
-  scoped_ptr<TripletSparseMatrix> A_;
+  scoped_ptr<BlockSparseMatrix> A_;
   scoped_array<double> b_;
   scoped_array<double> D_;
-  scoped_array<double> sol_unregularized_;
-  scoped_array<double> sol_regularized_;
 };
 
 #ifndef CERES_NO_SUITESPARSE
@@ -154,15 +123,6 @@
   options.use_postordering = true;
   TestSolver(options);
 }
-
-TEST_F(SparseNormalCholeskyLinearSolverTest,
-       SparseNormalCholeskyUsingSuiteSparseDynamicSparsity) {
-  LinearSolver::Options options;
-  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
-  options.type = SPARSE_NORMAL_CHOLESKY;
-  options.dynamic_sparsity = true;
-  TestSolver(options);
-}
 #endif
 
 #ifndef CERES_NO_CXSPARSE
@@ -183,15 +143,6 @@
   options.use_postordering = true;
   TestSolver(options);
 }
-
-TEST_F(SparseNormalCholeskyLinearSolverTest,
-       SparseNormalCholeskyUsingCXSparseDynamicSparsity) {
-  LinearSolver::Options options;
-  options.sparse_linear_algebra_library_type = CX_SPARSE;
-  options.type = SPARSE_NORMAL_CHOLESKY;
-  options.dynamic_sparsity = true;
-  TestSolver(options);
-}
 #endif
 
 #ifdef CERES_USE_EIGEN_SPARSE
@@ -212,15 +163,6 @@
   options.use_postordering = true;
   TestSolver(options);
 }
-
-TEST_F(SparseNormalCholeskyLinearSolverTest,
-       SparseNormalCholeskyUsingEigenDynamicSparsity) {
-  LinearSolver::Options options;
-  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
-  options.type = SPARSE_NORMAL_CHOLESKY;
-  options.dynamic_sparsity = true;
-  TestSolver(options);
-}
 #endif  // CERES_USE_EIGEN_SPARSE
 
 }  // namespace internal