Delete obsolete code

Remove outer product computation code from CompressedRowSparseMatrix.
In the process also remove the crsb_cols and crsb_rows vectors from
the matrix, which were added to carry the block sparsity of the matrix
so that the outer product could be computed fast.

InnerProductComputer and its reliance on BlockSparseMatrix has
rendered all of this code moot.

Change-Id: If3ee0dc8ad4ff79594fd1eebc15a647c4495d726
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
index d21aa7d..0444d45 100644
--- a/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -123,12 +123,6 @@
   int* rows = jacobian->mutable_rows();
   int* cols = jacobian->mutable_cols();
 
-  // Initialize crsb rows and cols.
-  std::vector<int>& crsb_rows = *jacobian->mutable_crsb_rows();
-  std::vector<int>& crsb_cols = *jacobian->mutable_crsb_cols();
-  crsb_rows.resize(residual_blocks.size() + 1);
-  crsb_rows[0] = 0;
-
   int row_pos = 0;
   rows[0] = 0;
   for (int i = 0; i < residual_blocks.size(); ++i) {
@@ -165,11 +159,6 @@
                  << "Parameter Blocks: " << parameter_block_description;
     }
 
-    // Populate crsb rows and cols.
-    crsb_rows[i + 1] = crsb_rows[i] + parameter_indices.size();
-    std::copy(parameter_indices.begin(), parameter_indices.end(),
-              std::back_inserter(crsb_cols));
-
     // Update the row indices.
     const int num_residuals = residual_block->NumResiduals();
     for (int j = 0; j < num_residuals; ++j) {
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index 3e75f19..ee59eae 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -311,10 +311,6 @@
     return;
   }
 
-  // Sanity check for compressed row sparse block information
-  CHECK_EQ(crsb_rows_.size(), row_blocks_.size() + 1);
-  CHECK_EQ(crsb_rows_.back(), crsb_cols_.size());
-
   // Walk the list of row blocks until we reach the new number of rows
   // and the drop the rest of the row blocks.
   int num_row_blocks = 0;
@@ -325,11 +321,6 @@
   }
 
   row_blocks_.resize(num_row_blocks);
-
-  // Update compressed row sparse block (crsb) information.
-  CHECK_EQ(num_rows, num_rows_);
-  crsb_rows_.resize(num_row_blocks + 1);
-  crsb_cols_.resize(crsb_rows_[num_row_blocks]);
 }
 
 void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
@@ -377,32 +368,8 @@
     return;
   }
 
-  // Sanity check for compressed row sparse block information
-  CHECK_EQ(crsb_rows_.size(), row_blocks_.size() + 1);
-  CHECK_EQ(crsb_rows_.back(), crsb_cols_.size());
-
   row_blocks_.insert(
       row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
-
-  // The rest of the code updates the compressed row sparse block
-  // (crsb) information.
-  const int num_crsb_nonzeros = crsb_cols_.size();
-  const int m_num_crsb_nonzeros = m.crsb_cols_.size();
-  crsb_cols_.resize(num_crsb_nonzeros + m_num_crsb_nonzeros);
-  std::copy(&m.crsb_cols()[0],
-            &m.crsb_cols()[0] + m_num_crsb_nonzeros,
-            &crsb_cols_[num_crsb_nonzeros]);
-
-  const int num_crsb_rows = crsb_rows_.size() - 1;
-  const int m_num_crsb_rows = m.crsb_rows_.size() - 1;
-  crsb_rows_.resize(num_crsb_rows + m_num_crsb_rows + 1);
-  std::fill(crsb_rows_.begin() + num_crsb_rows,
-            crsb_rows_.begin() + num_crsb_rows + m_num_crsb_rows + 1,
-            crsb_rows_[num_crsb_rows]);
-
-  for (int r = 0; r < m_num_crsb_rows + 1; ++r) {
-    crsb_rows_[num_crsb_rows + r] += m.crsb_rows()[r];
-  }
 }
 
 void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
@@ -469,15 +436,6 @@
   *matrix->mutable_row_blocks() = blocks;
   *matrix->mutable_col_blocks() = blocks;
 
-  // Fill compressed row sparse block (crsb) information.
-  vector<int>& crsb_rows = *matrix->mutable_crsb_rows();
-  vector<int>& crsb_cols = *matrix->mutable_crsb_cols();
-  for (int i = 0; i < blocks.size(); ++i) {
-    crsb_rows.push_back(i);
-    crsb_cols.push_back(i);
-  }
-  crsb_rows.push_back(blocks.size());
-
   CHECK_EQ(idx_cursor, num_nonzeros);
   CHECK_EQ(col_cursor, num_rows);
   return matrix;
@@ -517,381 +475,11 @@
     return transpose;
   }
 
-  // Sanity check for compressed row sparse block information
-  CHECK_EQ(crsb_rows_.size(), row_blocks_.size() + 1);
-  CHECK_EQ(crsb_rows_.back(), crsb_cols_.size());
-
   *(transpose->mutable_row_blocks()) = col_blocks_;
   *(transpose->mutable_col_blocks()) = row_blocks_;
-
-  // The rest of the code updates the compressed row sparse block
-  // (crsb) information.
-  vector<int>& transpose_crsb_rows = *transpose->mutable_crsb_rows();
-  vector<int>& transpose_crsb_cols = *transpose->mutable_crsb_cols();
-
-  transpose_crsb_rows.resize(col_blocks_.size() + 1);
-  transpose_crsb_cols.resize(crsb_cols_.size());
-  TransposeForCompressedRowSparseStructure(row_blocks().size(),
-                                           col_blocks().size(),
-                                           crsb_cols().size(),
-                                           crsb_rows().data(),
-                                           crsb_cols().data(),
-                                           NULL,
-                                           transpose_crsb_rows.data(),
-                                           transpose_crsb_cols.data(),
-                                           NULL);
-
   return transpose;
 }
 
-namespace {
-// A ProductTerm is a term in the block outer product of a matrix with
-// itself.
-struct ProductTerm {
-  ProductTerm(const int row, const int col, const int index)
-      : row(row), col(col), index(index) {}
-
-  bool operator<(const ProductTerm& right) const {
-    if (row == right.row) {
-      if (col == right.col) {
-        return index < right.index;
-      }
-      return col < right.col;
-    }
-    return row < right.row;
-  }
-
-  int row;
-  int col;
-  int index;
-};
-
-// Create outer product matrix based on the block product information.
-// The input block product is already sorted. This function does not
-// set the sparse rows/cols information. Instead, it only collects the
-// nonzeros for each compressed row and puts in row_nnz. The caller of
-// this function will traverse the block product in a second round to
-// generate the sparse rows/cols information. This function also
-// computes the block offset information for the outer product matrix,
-// which is used in outer product computation.
-CompressedRowSparseMatrix* CreateOuterProductMatrix(
-    const int num_cols,
-    const CompressedRowSparseMatrix::StorageType storage_type,
-    const vector<int>& blocks,
-    const vector<ProductTerm>& product,
-    vector<int>* row_nnz) {
-  // Count the number of unique product term, which in turn is the
-  // number of non-zeros in the outer product. Also count the number
-  // of non-zeros in each row.
-  row_nnz->resize(blocks.size());
-  std::fill(row_nnz->begin(), row_nnz->end(), 0);
-  (*row_nnz)[product[0].row] = blocks[product[0].col];
-  int num_nonzeros = blocks[product[0].row] * blocks[product[0].col];
-  for (int i = 1; i < product.size(); ++i) {
-    // Each (row, col) block counts only once.
-    // This check depends on product sorted on (row, col).
-    if (product[i].row != product[i - 1].row ||
-        product[i].col != product[i - 1].col) {
-      (*row_nnz)[product[i].row] += blocks[product[i].col];
-      num_nonzeros += blocks[product[i].row] * blocks[product[i].col];
-    }
-  }
-
-  CompressedRowSparseMatrix* matrix =
-      new CompressedRowSparseMatrix(num_cols, num_cols, num_nonzeros);
-  matrix->set_storage_type(storage_type);
-
-  *(matrix->mutable_row_blocks()) = blocks;
-  *(matrix->mutable_col_blocks()) = blocks;
-
-  // Compute block offsets for outer product matrix, which is used in
-  // ComputeOuterProduct.
-  vector<int>* block_offsets = matrix->mutable_block_offsets();
-  block_offsets->resize(blocks.size() + 1);
-  (*block_offsets)[0] = 0;
-  for (int i = 0; i < blocks.size(); ++i) {
-    (*block_offsets)[i + 1] = (*block_offsets)[i] + blocks[i];
-  }
-
-  return matrix;
-}
-
-CompressedRowSparseMatrix* CompressAndFillProgram(
-    const int num_cols,
-    const CompressedRowSparseMatrix::StorageType storage_type,
-    const vector<int>& blocks,
-    const vector<ProductTerm>& product,
-    vector<int>* program) {
-  CHECK_GT(product.size(), 0);
-
-  vector<int> row_nnz;
-  CompressedRowSparseMatrix* matrix = CreateOuterProductMatrix(
-      num_cols, storage_type, blocks, product, &row_nnz);
-
-  const vector<int>& block_offsets = matrix->block_offsets();
-
-  int* crsm_rows = matrix->mutable_rows();
-  std::fill(crsm_rows, crsm_rows + num_cols + 1, 0);
-  int* crsm_cols = matrix->mutable_cols();
-  std::fill(crsm_cols, crsm_cols + matrix->num_nonzeros(), 0);
-
-  CHECK_NOTNULL(program)->clear();
-  program->resize(product.size());
-
-  // Non zero elements are not stored consecutively across rows in a block.
-  // We seperate nonzero into three categories:
-  //   nonzeros in all previous row blocks counted in nnz
-  //   nonzeros in current row counted in row_nnz
-  //   nonzeros in previous col blocks of current row counted in col_nnz
-  //
-  // Give an element (j, k) within a block such that j and k
-  // represent the relative position to the starting row and starting col of
-  // the block, the row and col for the element is
-  //   block_offsets[current.row] + j
-  //   block_offsets[current.col] + k
-  // The total number of nonzero to the element is
-  //   nnz + row_nnz[current.row] * j + col_nnz + k
-  //
-  // program keeps col_nnz for block product, which is used later for
-  // outer product computation.
-  //
-  // There is no special handling for diagonal blocks as we generate
-  // BLOCK triangular matrix (diagonal block is full block) instead of
-  // standard triangular matrix.
-  int nnz = 0;
-  int col_nnz = 0;
-
-  // Process first product term.
-  for (int j = 0; j < blocks[product[0].row]; ++j) {
-    crsm_rows[block_offsets[product[0].row] + j + 1] = row_nnz[product[0].row];
-    for (int k = 0; k < blocks[product[0].col]; ++k) {
-      crsm_cols[row_nnz[product[0].row] * j + k] =
-          block_offsets[product[0].col] + k;
-    }
-  }
-
-  (*program)[product[0].index] = 0;
-
-  // Process rest product terms.
-  for (int i = 1; i < product.size(); ++i) {
-    const ProductTerm& previous = product[i - 1];
-    const ProductTerm& current = product[i];
-
-    // Sparsity structure is updated only if the term is not a repeat.
-    if (previous.row != current.row || previous.col != current.col) {
-      col_nnz += blocks[previous.col];
-      if (previous.row != current.row) {
-        nnz += col_nnz * blocks[previous.row];
-        col_nnz = 0;
-
-        for (int j = 0; j < blocks[current.row]; ++j) {
-          crsm_rows[block_offsets[current.row] + j + 1] = row_nnz[current.row];
-        }
-      }
-
-      for (int j = 0; j < blocks[current.row]; ++j) {
-        for (int k = 0; k < blocks[current.col]; ++k) {
-          crsm_cols[nnz + row_nnz[current.row] * j + col_nnz + k] =
-              block_offsets[current.col] + k;
-        }
-      }
-    }
-
-    (*program)[current.index] = col_nnz;
-  }
-
-  for (int i = 1; i < num_cols + 1; ++i) {
-    crsm_rows[i] += crsm_rows[i - 1];
-  }
-
-  return matrix;
-}
-
-// input is a matrix of dimesion <row_block_size, input_cols>
-// output is a matrix of dimension <col_block1_size, output_cols>
-//
-// Implement block multiplication O = I1' * I2.
-// I1 is block(0, col_block1_begin, row_block_size, col_block1_size) of input
-// I2 is block(0, col_block2_begin, row_block_size, col_block2_size) of input
-// O is block(0, 0, col_block1_size, col_block2_size) of output
-void ComputeBlockMultiplication(const int row_block_size,
-                                const int col_block1_size,
-                                const int col_block2_size,
-                                const int col_block1_begin,
-                                const int col_block2_begin,
-                                const int input_cols,
-                                const double* input,
-                                const int output_cols,
-                                double* output) {
-  for (int r = 0; r < row_block_size; ++r) {
-    for (int idx1 = 0; idx1 < col_block1_size; ++idx1) {
-      for (int idx2 = 0; idx2 < col_block2_size; ++idx2) {
-        output[output_cols * idx1 + idx2] +=
-            input[input_cols * r + col_block1_begin + idx1] *
-            input[input_cols * r + col_block2_begin + idx2];
-      }
-    }
-  }
-}
-}  // namespace
-
-CompressedRowSparseMatrix*
-CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
-    const CompressedRowSparseMatrix& m,
-    const CompressedRowSparseMatrix::StorageType storage_type,
-    vector<int>* program) {
-  CHECK(storage_type == LOWER_TRIANGULAR || storage_type == UPPER_TRIANGULAR);
-  CHECK_NOTNULL(program)->clear();
-  CHECK_GT(m.num_nonzeros(), 0)
-      << "Congratulations, you found a bug in Ceres. Please report it.";
-
-  vector<ProductTerm> product;
-  const vector<int>& col_blocks = m.col_blocks();
-  const vector<int>& crsb_rows = m.crsb_rows();
-  const vector<int>& crsb_cols = m.crsb_cols();
-
-  // Give input matrix m in Compressed Row Sparse Block format
-  //     (row_block, col_block)
-  // represent each block multiplication
-  //     (row_block, col_block1)' X (row_block, col_block2)
-  // by its product term index and sort the product terms
-  //     (col_block1, col_block2, index)
-  //
-  // Due to the compression on rows, col_block is accessed through idx to
-  // crsb_cols.  So col_block is accessed as crsb_cols[idx] in the code.
-  for (int row_block = 1; row_block < crsb_rows.size(); ++row_block) {
-    for (int idx1 = crsb_rows[row_block - 1]; idx1 < crsb_rows[row_block];
-         ++idx1) {
-      if (storage_type == LOWER_TRIANGULAR) {
-        for (int idx2 = crsb_rows[row_block - 1]; idx2 <= idx1; ++idx2) {
-          product.push_back(
-              ProductTerm(crsb_cols[idx1], crsb_cols[idx2], product.size()));
-        }
-      } else {  // Upper triangular matrix.
-        for (int idx2 = idx1; idx2 < crsb_rows[row_block]; ++idx2) {
-          product.push_back(
-              ProductTerm(crsb_cols[idx1], crsb_cols[idx2], product.size()));
-        }
-      }
-    }
-  }
-
-  sort(product.begin(), product.end());
-  return CompressAndFillProgram(
-      m.num_cols(), storage_type, col_blocks, product, program);
-}
-
-// Give input matrix m in Compressed Row Sparse Block format
-//     (row_block, col_block)
-// compute outer product m' * m as sum of block multiplications
-//     (row_block, col_block1)' X (row_block, col_block2)
-//
-// Given row_block of the input matrix m, we use m_row_begin to represent
-// the starting row of the row block and m_row_nnz to represent number of
-// nonzero in each row of the row block, then the rows belonging to
-// the row block can be represented as a dense matrix starting at
-//     m.values() + m.rows()[m_row_begin]
-// with dimension
-//     <m.row_blocks()[row_block], m_row_nnz>
-//
-// Then each input matrix block (row_block, col_block) can be represented as
-// a block of above dense matrix starting at position
-//     (0, m_col_nnz)
-// with size
-//     <m.row_blocks()[row_block], m.col_blocks()[col_block]>
-// where m_col_nnz is the number of nonzero before col_block in each row.
-//
-// The outer product block is represented similarly with m_row_begin,
-// m_row_nnz, m_col_nnz, etc. replaced by row_begin, row_nnz, col_nnz,
-// etc. The difference is, m_row_begin and m_col_nnz is counted
-// during the traverse of block multiplication, while row_begin and
-// col_nnz are got from pre-computed block_offsets and program.
-//
-// Due to the compression on rows, col_block is accessed through
-// idx to crsb_col vector. So col_block is accessed as crsb_col[idx]
-// in the code.
-//
-// Note this function produces a triangular matrix in block unit (i.e.
-// diagonal block is a normal block) instead of standard triangular matrix.
-// So there is no special handling for diagonal blocks.
-void CompressedRowSparseMatrix::ComputeOuterProduct(
-    const CompressedRowSparseMatrix& m,
-    const vector<int>& program,
-    CompressedRowSparseMatrix* result) {
-  CHECK(result->storage_type() == LOWER_TRIANGULAR ||
-        result->storage_type() == UPPER_TRIANGULAR);
-  result->SetZero();
-  double* values = result->mutable_values();
-  const int* rows = result->rows();
-  const vector<int>& block_offsets = result->block_offsets();
-
-  int cursor = 0;
-  const double* m_values = m.values();
-  const int* m_rows = m.rows();
-  const vector<int>& row_blocks = m.row_blocks();
-  const vector<int>& col_blocks = m.col_blocks();
-  const vector<int>& crsb_rows = m.crsb_rows();
-  const vector<int>& crsb_cols = m.crsb_cols();
-  const StorageType storage_type = result->storage_type();
-#define COL_BLOCK1 (crsb_cols[idx1])
-#define COL_BLOCK2 (crsb_cols[idx2])
-
-  // Iterate row blocks.
-  for (int row_block = 0, m_row_begin = 0; row_block < row_blocks.size();
-       m_row_begin += row_blocks[row_block++]) {
-    // Non zeros are not stored consecutively across rows in a block.
-    // The gaps between rows is the number of nonzeros of the
-    // input matrix compressed row.
-    const int m_row_nnz = m_rows[m_row_begin + 1] - m_rows[m_row_begin];
-
-    // Iterate (col_block1 x col_block2).
-    for (int idx1 = crsb_rows[row_block], m_col_nnz1 = 0;
-         idx1 < crsb_rows[row_block + 1];
-         m_col_nnz1 += col_blocks[COL_BLOCK1], ++idx1) {
-      // Non zeros are not stored consecutively across rows in a
-      // block. The gaps between rows is the number of nonzeros of the
-      // outer product matrix compressed row.
-      const int row_begin = block_offsets[COL_BLOCK1];
-      const int row_nnz = rows[row_begin + 1] - rows[row_begin];
-      if (storage_type == LOWER_TRIANGULAR) {
-        for (int idx2 = crsb_rows[row_block], m_col_nnz2 = 0; idx2 <= idx1;
-             m_col_nnz2 += col_blocks[COL_BLOCK2], ++idx2, ++cursor) {
-          int col_nnz = program[cursor];
-          ComputeBlockMultiplication(row_blocks[row_block],
-                                     col_blocks[COL_BLOCK1],
-                                     col_blocks[COL_BLOCK2],
-                                     m_col_nnz1,
-                                     m_col_nnz2,
-                                     m_row_nnz,
-                                     m_values + m_rows[m_row_begin],
-                                     row_nnz,
-                                     values + rows[row_begin] + col_nnz);
-        }
-      } else {
-        for (int idx2 = idx1, m_col_nnz2 = m_col_nnz1;
-             idx2 < crsb_rows[row_block + 1];
-             m_col_nnz2 += col_blocks[COL_BLOCK2], ++idx2, ++cursor) {
-          int col_nnz = program[cursor];
-          ComputeBlockMultiplication(row_blocks[row_block],
-                                     col_blocks[COL_BLOCK1],
-                                     col_blocks[COL_BLOCK2],
-                                     m_col_nnz1,
-                                     m_col_nnz2,
-                                     m_row_nnz,
-                                     m_values + m_rows[m_row_begin],
-                                     row_nnz,
-                                     values + rows[row_begin] + col_nnz);
-        }
-      }
-    }
-  }
-
-#undef COL_BLOCK1
-#undef COL_BLOCK2
-
-  CHECK_EQ(cursor, program.size());
-}
-
 CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
     const CompressedRowSparseMatrix::RandomMatrixOptions& options) {
   CHECK_GT(options.num_row_blocks, 0);
@@ -924,8 +512,6 @@
     col_blocks.push_back(options.min_col_block_size + delta_block_size);
   }
 
-  vector<int> crsb_rows;
-  vector<int> crsb_cols;
   vector<int> tsm_rows;
   vector<int> tsm_cols;
   vector<double> tsm_values;
@@ -939,8 +525,6 @@
   // not what the user wants, so do the matrix generation till we have
   // at least one non-zero entry.
   while (tsm_values.empty()) {
-    crsb_rows.clear();
-    crsb_cols.clear();
     tsm_rows.clear();
     tsm_cols.clear();
     tsm_values.clear();
@@ -948,7 +532,6 @@
     int row_block_begin = 0;
     for (int r = 0; r < options.num_row_blocks; ++r) {
       int col_block_begin = 0;
-      crsb_rows.push_back(crsb_cols.size());
       for (int c = 0; c < options.num_col_blocks; ++c) {
         // Randomly determine if this block is present or not.
         if (RandDouble() <= options.block_density) {
@@ -959,14 +542,11 @@
                          &tsm_rows,
                          &tsm_cols,
                          &tsm_values);
-          // Add the block to the block sparse structure.
-          crsb_cols.push_back(c);
         }
         col_block_begin += col_blocks[c];
       }
       row_block_begin += row_blocks[r];
     }
-    crsb_rows.push_back(crsb_cols.size());
   }
 
   const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
@@ -979,8 +559,6 @@
           kDoNotTranspose);
   (*matrix->mutable_row_blocks()) = row_blocks;
   (*matrix->mutable_col_blocks()) = col_blocks;
-  (*matrix->mutable_crsb_rows()) = crsb_rows;
-  (*matrix->mutable_crsb_cols()) = crsb_cols;
   matrix->set_storage_type(CompressedRowSparseMatrix::UNSYMMETRIC);
   return matrix;
 }
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index 1e26f7c..67b043e 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -154,15 +154,6 @@
   const std::vector<int>& col_blocks() const { return col_blocks_; }
   std::vector<int>* mutable_col_blocks() { return &col_blocks_; }
 
-  const std::vector<int>& block_offsets() const { return block_offsets_; }
-  std::vector<int>* mutable_block_offsets() { return &block_offsets_; }
-
-  const std::vector<int>& crsb_rows() const { return crsb_rows_; }
-  std::vector<int>* mutable_crsb_rows() { return &crsb_rows_; }
-
-  const std::vector<int>& crsb_cols() const { return crsb_cols_; }
-  std::vector<int>* mutable_crsb_cols() { return &crsb_cols_; }
-
   // Create a block diagonal CompressedRowSparseMatrix with the given
   // block structure. The individual blocks are assumed to be laid out
   // contiguously in the diagonal array, one block at a time.
@@ -217,38 +208,7 @@
   static CompressedRowSparseMatrix* CreateRandomMatrix(
       const RandomMatrixOptions& options);
 
-  // Compute the sparsity structure of the product m.transpose() * m
-  // and create a CompressedRowSparseMatrix corresponding to it.
-  //
-  // Also compute a "program" vector, which for every term in the
-  // block outer product provides the information for the entry
-  // in the values array of the result matrix where it should be accumulated.
-  //
-  // This program is used by the ComputeOuterProduct function below to
-  // compute the outer product.
-  //
-  // Since the entries of the program are the same for rows with the
-  // same sparsity structure, the program only stores the result for
-  // one row per row block. The ComputeOuterProduct function reuses
-  // this information for each row in the row block.
-  //
-  // storage_type controls the form of the output matrix. It can be
-  // LOWER_TRIANGULAR or UPPER_TRIANGULAR.
-  static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
-      const CompressedRowSparseMatrix& m,
-      const StorageType storage_type,
-      std::vector<int>* program);
-
-  // Compute the values array for the expression m.transpose() * m,
-  // where the matrix used to store the result and a program have been
-  // created using the CreateOuterProductMatrixAndProgram function
-  // above.
-  static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
-                                  const std::vector<int>& program,
-                                  CompressedRowSparseMatrix* result);
-
  private:
-
   static CompressedRowSparseMatrix* FromTripletSparseMatrix(
       const TripletSparseMatrix& input, bool transpose);
 
@@ -266,18 +226,6 @@
   // any way.
   std::vector<int> row_blocks_;
   std::vector<int> col_blocks_;
-
-  // For outer product matrix (J' * J), we pre-compute its block
-  // offsets information here for fast outer product computation in
-  // block unit.  Since the outer product matrix is symmetric, we do
-  // not need to distinguish row or col block. In another word, this
-  // is the prefix sum of row_blocks_/col_blocks_.
-  std::vector<int> block_offsets_;
-
-  // If the matrix has an underlying block structure, then it can also
-  // carry with it compressed row sparse block information.
-  std::vector<int> crsb_rows_;
-  std::vector<int> crsb_cols_;
 };
 
 }  // namespace internal
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index bb8456e..ab8f4ad 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -90,15 +90,6 @@
     vector<int>* col_blocks = crsm->mutable_col_blocks();
     col_blocks->resize(num_cols);
     std::fill(col_blocks->begin(), col_blocks->end(), 1);
-
-    // With all blocks of size 1, crsb_rows and crsb_cols are equivalent to
-    // rows and cols.
-    std::copy(crsm->rows(),
-              crsm->rows() + crsm->num_rows() + 1,
-              std::back_inserter(*crsm->mutable_crsb_rows()));
-    std::copy(crsm->cols(),
-              crsm->cols() + crsm->num_nonzeros(),
-              std::back_inserter(*crsm->mutable_crsb_cols()));
   }
 
   int num_rows;
@@ -152,8 +143,6 @@
   // Clear the row and column blocks as these are purely scalar tests.
   crsm->mutable_row_blocks()->clear();
   crsm->mutable_col_blocks()->clear();
-  crsm->mutable_crsb_rows()->clear();
-  crsm->mutable_crsb_cols()->clear();
 
   for (int i = 0; i < num_rows; ++i) {
     tsm->Resize(num_rows - i, num_cols);
@@ -166,8 +155,6 @@
   // Clear the row and column blocks as these are purely scalar tests.
   crsm->mutable_row_blocks()->clear();
   crsm->mutable_col_blocks()->clear();
-  crsm->mutable_crsb_rows()->clear();
-  crsm->mutable_crsb_cols()->clear();
 
   for (int i = 0; i < num_rows; ++i) {
     TripletSparseMatrix tsm_appendage(*tsm);
@@ -198,9 +185,6 @@
   const vector<int> pre_row_blocks = crsm->row_blocks();
   const vector<int> pre_col_blocks = crsm->col_blocks();
 
-  const vector<int> pre_crsb_rows = crsm->crsb_rows();
-  const vector<int> pre_crsb_cols = crsm->crsb_cols();
-
   scoped_ptr<CompressedRowSparseMatrix> appendage(
       CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
           diagonal.get(), row_and_column_blocks));
@@ -221,22 +205,9 @@
   EXPECT_EQ(expected_row_blocks, crsm->row_blocks());
   EXPECT_EQ(expected_col_blocks, crsm->col_blocks());
 
-  EXPECT_EQ(crsm->crsb_cols().size(),
-            pre_crsb_cols.size() + row_and_column_blocks.size());
-  EXPECT_EQ(crsm->crsb_rows().size(),
-            pre_crsb_rows.size() + row_and_column_blocks.size());
-  for (int i = 0; i < row_and_column_blocks.size(); ++i) {
-    EXPECT_EQ(crsm->crsb_rows()[i + pre_crsb_rows.size()],
-              pre_crsb_rows.back() + i + 1);
-    EXPECT_EQ(crsm->crsb_cols()[i + pre_crsb_cols.size()], i);
-  }
-
   crsm->DeleteRows(num_diagonal_rows);
   EXPECT_EQ(crsm->row_blocks(), pre_row_blocks);
   EXPECT_EQ(crsm->col_blocks(), pre_col_blocks);
-
-  EXPECT_EQ(crsm->crsb_rows(), pre_crsb_rows);
-  EXPECT_EQ(crsm->crsb_cols(), pre_crsb_cols);
 }
 
 TEST_F(CompressedRowSparseMatrixTest, ToDenseMatrix) {
@@ -334,14 +305,6 @@
   matrix.mutable_col_blocks()->push_back(4);
   matrix.mutable_col_blocks()->push_back(2);
 
-  matrix.mutable_crsb_rows()->push_back(0);
-  matrix.mutable_crsb_rows()->push_back(2);
-  matrix.mutable_crsb_rows()->push_back(4);
-  matrix.mutable_crsb_cols()->push_back(0);
-  matrix.mutable_crsb_cols()->push_back(1);
-  matrix.mutable_crsb_cols()->push_back(0);
-  matrix.mutable_crsb_cols()->push_back(1);
-
   rows[0] = 0;
   cols[0] = 1;
   cols[1] = 3;
@@ -392,93 +355,6 @@
   EXPECT_NEAR((dense_matrix - dense_transpose.transpose()).norm(), 0.0, 1e-14);
 }
 
-TEST(CompressedRowSparseMatrix, ComputeOuterProduct) {
-  // "Randomly generated seed."
-  SetRandomState(29823);
-  const int kMaxNumRowBlocks = 10;
-  const int kMaxNumColBlocks = 10;
-  const int kNumTrials = 10;
-
-  // Create a random matrix, compute its outer product using Eigen and
-  // ComputeOuterProduct. Convert both matrices to dense matrices and
-  // compare their upper triangular parts.
-  for (int num_row_blocks = 1; num_row_blocks < kMaxNumRowBlocks;
-       ++num_row_blocks) {
-    for (int num_col_blocks = 1; num_col_blocks < kMaxNumColBlocks;
-         ++num_col_blocks) {
-      for (int trial = 0; trial < kNumTrials; ++trial) {
-        CompressedRowSparseMatrix::RandomMatrixOptions options;
-        options.num_row_blocks = num_row_blocks;
-        options.num_col_blocks = num_col_blocks;
-        options.min_row_block_size = 1;
-        options.max_row_block_size = 5;
-        options.min_col_block_size = 1;
-        options.max_col_block_size = 10;
-        options.block_density = std::max(0.1, RandDouble());
-
-        VLOG(2) << "num row blocks: " << options.num_row_blocks;
-        VLOG(2) << "num col blocks: " << options.num_col_blocks;
-        VLOG(2) << "min row block size: " << options.min_row_block_size;
-        VLOG(2) << "max row block size: " << options.max_row_block_size;
-        VLOG(2) << "min col block size: " << options.min_col_block_size;
-        VLOG(2) << "max col block size: " << options.max_col_block_size;
-        VLOG(2) << "block density: " << options.block_density;
-
-        scoped_ptr<CompressedRowSparseMatrix> random_matrix(
-            CompressedRowSparseMatrix::CreateRandomMatrix(options));
-
-        Eigen::MappedSparseMatrix<double, Eigen::RowMajor> mapped_random_matrix(
-            random_matrix->num_rows(),
-            random_matrix->num_cols(),
-            random_matrix->num_nonzeros(),
-            random_matrix->mutable_rows(),
-            random_matrix->mutable_cols(),
-            random_matrix->mutable_values());
-
-        Matrix expected_outer_product =
-            mapped_random_matrix.transpose() * mapped_random_matrix;
-
-        // Use compressed row lower triangular matrix, which will then
-        // get mapped to a compressed column upper triangular matrix.
-        vector<int> program;
-        scoped_ptr<CompressedRowSparseMatrix> outer_product(
-            CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
-                *random_matrix,
-                CompressedRowSparseMatrix::LOWER_TRIANGULAR,
-                &program));
-        CompressedRowSparseMatrix::ComputeOuterProduct(
-            *random_matrix, program, outer_product.get());
-
-        EXPECT_EQ(outer_product->row_blocks(), random_matrix->col_blocks());
-        EXPECT_EQ(outer_product->col_blocks(), random_matrix->col_blocks());
-
-        Matrix actual_outer_product =
-            Eigen::MappedSparseMatrix<double, Eigen::ColMajor>(
-                outer_product->num_rows(),
-                outer_product->num_rows(),
-                outer_product->num_nonzeros(),
-                outer_product->mutable_rows(),
-                outer_product->mutable_cols(),
-                outer_product->mutable_values());
-        expected_outer_product.triangularView<Eigen::StrictlyLower>().setZero();
-        actual_outer_product.triangularView<Eigen::StrictlyLower>().setZero();
-
-        EXPECT_EQ(actual_outer_product.rows(), actual_outer_product.cols());
-        EXPECT_EQ(expected_outer_product.rows(), expected_outer_product.cols());
-        EXPECT_EQ(actual_outer_product.rows(), expected_outer_product.rows());
-
-        const double diff_norm =
-            (actual_outer_product - expected_outer_product).norm() /
-            expected_outer_product.norm();
-        EXPECT_NEAR(diff_norm, 0.0, std::numeric_limits<double>::epsilon())
-            << "expected: \n"
-            << expected_outer_product << "\nactual: \n"
-            << actual_outer_product;
-      }
-    }
-  }
-}
-
 TEST(CompressedRowSparseMatrix, FromTripletSparseMatrix) {
   TripletSparseMatrix::RandomMatrixOptions options;
   options.num_rows = 5;