Refactor CreateRandomCompressedRowSparseMatrix
Move it to compressed_row_sparse_matrix.h/cc for upcoming re-use.
Also clean up the tests for ComputeOuterProduct so that they do
not depend on CXSparse anymore and use Eigen instead. This also
makes the test simpler and shorter.
Change-Id: I06bbeb3b0c6a07fb1f3da354ef0abd17d246be9a
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index 5d56612..68af5f8 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
+#include "ceres/random.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
@@ -54,9 +55,7 @@
//
// If this is the case, this functor will not be a StrictWeakOrdering.
struct RowColLessThan {
- RowColLessThan(const int* rows, const int* cols)
- : rows(rows), cols(cols) {
- }
+ RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
bool operator()(const int x, const int y) const {
if (rows[x] == rows[y]) {
@@ -72,12 +71,12 @@
void TransposeForCompressedRowSparseStructure(const int num_rows,
const int num_cols,
const int num_nonzeros,
- const int *rows,
- const int *cols,
- const double *values,
+ const int* rows,
+ const int* cols,
+ const double* values,
int* transpose_rows,
- int *transpose_cols,
- double *transpose_values) {
+ int* transpose_cols,
+ double* transpose_values) {
for (int idx = 0; idx < num_nonzeros; ++idx) {
++transpose_rows[cols[idx] + 1];
}
@@ -92,12 +91,12 @@
const int transpose_idx = transpose_rows[c]++;
transpose_cols[transpose_idx] = r;
if (values) {
- transpose_values[transpose_idx] = values[idx];
+ transpose_values[transpose_idx] = values[idx];
}
}
}
- for (int i = num_cols - 1; i > 0 ; --i) {
+ for (int i = num_cols - 1; i > 0; --i) {
transpose_rows[i] = transpose_rows[i - 1];
}
transpose_rows[0] = 0;
@@ -114,13 +113,11 @@
cols_.resize(max_num_nonzeros, 0);
values_.resize(max_num_nonzeros, 0.0);
-
- VLOG(1) << "# of rows: " << num_rows_
- << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << cols_.size()
- << ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
- cols_.size() * sizeof(int) + // NOLINT
- cols_.size() * sizeof(double); // NOLINT
+ VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << cols_.size() << ". Allocating "
+ << (num_rows_ + 1) * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(double); // NOLINT
}
CompressedRowSparseMatrix::CompressedRowSparseMatrix(
@@ -142,10 +139,8 @@
// are broken by column.
sort(index.begin(), index.end(), RowColLessThan(m.rows(), m.cols()));
- VLOG(1) << "# of rows: " << num_rows_
- << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << cols_.size()
- << ". Allocating "
+ VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << cols_.size() << ". Allocating "
<< ((num_rows_ + 1) * sizeof(int) + // NOLINT
cols_.size() * sizeof(int) + // NOLINT
cols_.size() * sizeof(double)); // NOLINT
@@ -187,8 +182,7 @@
CHECK_EQ(num_nonzeros(), num_rows);
}
-CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {
-}
+CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
void CompressedRowSparseMatrix::SetZero() {
std::fill(values_.begin(), values_.end(), 0);
@@ -303,9 +297,8 @@
DCHECK_LT(num_nonzeros(), cols_.size());
if (m.num_nonzeros() > 0) {
std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
- std::copy(m.values(),
- m.values() + m.num_nonzeros(),
- &values_[num_nonzeros()]);
+ std::copy(
+ m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
}
rows_.resize(num_rows_ + m.num_rows() + 1);
@@ -330,15 +323,15 @@
CHECK_EQ(crsb_rows_.size(), row_blocks_.size() + 1);
CHECK_EQ(crsb_rows_.back(), crsb_cols_.size());
- row_blocks_.insert(row_blocks_.end(),
- m.row_blocks().begin(),
- m.row_blocks().end());
+ row_blocks_.insert(
+ row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
// The rest of the code update compressed row sparse block (crsb) information.
const int num_crsb_nonzeros = crsb_cols_.size();
const int m_num_crsb_nonzeros = m.crsb_cols_.size();
crsb_cols_.resize(num_crsb_nonzeros + m_num_crsb_nonzeros);
- std::copy(&m.crsb_cols()[0], &m.crsb_cols()[0] + m_num_crsb_nonzeros,
+ std::copy(&m.crsb_cols()[0],
+ &m.crsb_cols()[0] + m_num_crsb_nonzeros,
&crsb_cols_[num_crsb_nonzeros]);
const int num_crsb_rows = crsb_rows_.size() - 1;
@@ -357,11 +350,7 @@
CHECK_NOTNULL(file);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- fprintf(file,
- "% 10d % 10d %17f\n",
- r,
- cols_[idx],
- values_[idx]);
+ fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
}
}
}
@@ -392,7 +381,7 @@
for (int idx = rows_[r]; idx < rows_[r + 1] - 1; ++idx) {
solution[r] -= values_[idx] * solution[cols_[idx]];
}
- solution[r] /= values_[rows_[r + 1] - 1];
+ solution[r] /= values_[rows_[r + 1] - 1];
}
}
@@ -407,8 +396,7 @@
}
CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
- const double* diagonal,
- const vector<int>& blocks) {
+ const double* diagonal, const vector<int>& blocks) {
int num_rows = 0;
int num_nonzeros = 0;
for (int i = 0; i < blocks.size(); ++i) {
@@ -460,12 +448,15 @@
CompressedRowSparseMatrix* transpose =
new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
- TransposeForCompressedRowSparseStructure(
- num_rows(), num_cols(), num_nonzeros(),
- rows(), cols(), values(),
- transpose->mutable_rows(),
- transpose->mutable_cols(),
- transpose->mutable_values());
+ TransposeForCompressedRowSparseStructure(num_rows(),
+ num_cols(),
+ num_nonzeros(),
+ rows(),
+ cols(),
+ values(),
+ transpose->mutable_rows(),
+ transpose->mutable_cols(),
+ transpose->mutable_values());
// The rest of the code update block information.
// Immediately return in case of no block information.
@@ -488,10 +479,15 @@
std::fill(transpose_crsb_rows.begin(), transpose_crsb_rows.end(), 0);
transpose_crsb_cols.resize(crsb_cols_.size());
- TransposeForCompressedRowSparseStructure(
- row_blocks().size(), col_blocks().size(), crsb_cols().size(),
- crsb_rows().data(), crsb_cols().data(), NULL,
- transpose_crsb_rows.data(), transpose_crsb_cols.data(), NULL);
+ TransposeForCompressedRowSparseStructure(row_blocks().size(),
+ col_blocks().size(),
+ crsb_cols().size(),
+ crsb_rows().data(),
+ crsb_cols().data(),
+ NULL,
+ transpose_crsb_rows.data(),
+ transpose_crsb_cols.data(),
+ NULL);
return transpose;
}
@@ -501,8 +497,7 @@
// itself.
struct ProductTerm {
ProductTerm(const int row, const int col, const int index)
- : row(row), col(col), index(index) {
- }
+ : row(row), col(col), index(index) {}
bool operator<(const ProductTerm& right) const {
if (row == right.row) {
@@ -527,12 +522,11 @@
// round to generate the sparse rows/cols information.
// This function also computes the block offset information for
// the outerproduct matrix, which is used in outer product computation.
-CompressedRowSparseMatrix*
-CreateOuterProductMatrix(const int num_cols,
- const vector<int>& blocks,
- const vector<ProductTerm>& product,
- vector<int>* row_nnz) {
-
+CompressedRowSparseMatrix* CreateOuterProductMatrix(
+ const int num_cols,
+ const vector<int>& blocks,
+ const vector<ProductTerm>& product,
+ vector<int>* row_nnz) {
// Count the number of unique product term, which in turn is the
// number of non-zeros in the outer product.
// Also count the number of non-zeros in each row.
@@ -565,11 +559,11 @@
return matrix;
}
-CompressedRowSparseMatrix*
-CompressAndFillProgram(const int num_cols,
- const vector<int>& blocks,
- const vector<ProductTerm>& product,
- vector<int>* program) {
+CompressedRowSparseMatrix* CompressAndFillProgram(
+ const int num_cols,
+ const vector<int>& blocks,
+ const vector<ProductTerm>& product,
+ vector<int>* program) {
CHECK_GT(product.size(), 0);
vector<int> row_nnz;
@@ -611,11 +605,10 @@
// Process first product term.
for (int j = 0; j < blocks[product[0].row]; ++j) {
- crsm_rows[block_offsets[product[0].row] + j + 1] =
- row_nnz[product[0].row];
+ crsm_rows[block_offsets[product[0].row] + j + 1] = row_nnz[product[0].row];
for (int k = 0; k < blocks[product[0].col]; ++k) {
- crsm_cols[row_nnz[product[0].row] * j + k]
- = block_offsets[product[0].col] + k;
+ crsm_cols[row_nnz[product[0].row] * j + k] =
+ block_offsets[product[0].col] + k;
}
}
@@ -634,15 +627,14 @@
col_nnz = 0;
for (int j = 0; j < blocks[current.row]; ++j) {
- crsm_rows[block_offsets[current.row] + j + 1] =
- row_nnz[current.row];
+ crsm_rows[block_offsets[current.row] + j + 1] = row_nnz[current.row];
}
}
for (int j = 0; j < blocks[current.row]; ++j) {
for (int k = 0; k < blocks[current.col]; ++k) {
- crsm_cols[nnz + row_nnz[current.row] * j + col_nnz + k]
- = block_offsets[current.col] + k;
+ crsm_cols[nnz + row_nnz[current.row] * j + col_nnz + k] =
+ block_offsets[current.col] + k;
}
}
}
@@ -670,9 +662,9 @@
const int col_block1_begin,
const int col_block2_begin,
const int input_cols,
- const double *input,
+ const double* input,
const int output_cols,
- double *output) {
+ double* output) {
for (int r = 0; r < row_block_size; ++r) {
for (int idx1 = 0; idx1 < col_block1_size; ++idx1) {
for (int idx2 = 0; idx2 < col_block2_size; ++idx2) {
@@ -687,13 +679,10 @@
CompressedRowSparseMatrix*
CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
- const CompressedRowSparseMatrix& m,
- const int stype,
- vector<int>* program) {
+ const CompressedRowSparseMatrix& m, const int stype, vector<int>* program) {
CHECK_NOTNULL(program)->clear();
CHECK_GT(m.num_nonzeros(), 0)
- << "Congratulations, "
- << "you found a bug in Ceres. Please report it.";
+ << "Congratulations, you found a bug in Ceres. Please report it.";
vector<ProductTerm> product;
const vector<int>& col_blocks = m.col_blocks();
@@ -712,16 +701,15 @@
for (int row_block = 1; row_block < crsb_rows.size(); ++row_block) {
for (int idx1 = crsb_rows[row_block - 1]; idx1 < crsb_rows[row_block];
++idx1) {
- if (stype > 0) { // Lower triangular matrix.
+ if (stype > 0) { // Lower triangular matrix.
for (int idx2 = crsb_rows[row_block - 1]; idx2 <= idx1; ++idx2) {
- product.push_back(ProductTerm(crsb_cols[idx1], crsb_cols[idx2],
- product.size()));
+ product.push_back(
+ ProductTerm(crsb_cols[idx1], crsb_cols[idx2], product.size()));
}
- }
- else { // Upper triangular matrix.
+ } else { // Upper triangular matrix.
for (int idx2 = idx1; idx2 < crsb_rows[row_block]; ++idx2) {
- product.push_back(ProductTerm(crsb_cols[idx1], crsb_cols[idx2],
- product.size()));
+ product.push_back(
+ ProductTerm(crsb_cols[idx1], crsb_cols[idx2], product.size()));
}
}
}
@@ -791,8 +779,7 @@
// Non zeros are not stored consecutively across rows in a block.
// The gaps between rows is the number of nonzeros of the
// input matrix compressed row.
- const int m_row_nnz =
- m_rows[m_row_begin + 1] - m_rows[m_row_begin];
+ const int m_row_nnz = m_rows[m_row_begin + 1] - m_rows[m_row_begin];
// Iterate (col_block1 x col_block2).
for (int idx1 = crsb_rows[row_block], m_col_nnz1 = 0;
@@ -805,8 +792,7 @@
const int row_nnz = rows[row_begin + 1] - rows[row_begin];
if (stype > 0) { // Lower triangular matrix.
- for (int idx2 = crsb_rows[row_block], m_col_nnz2 = 0;
- idx2 <= idx1;
+ for (int idx2 = crsb_rows[row_block], m_col_nnz2 = 0; idx2 <= idx1;
m_col_nnz2 += col_blocks[COL_BLOCK2], ++idx2, ++cursor) {
int col_nnz = program[cursor];
ComputeBlockMultiplication(row_blocks[row_block],
@@ -819,8 +805,7 @@
row_nnz,
values + rows[row_begin] + col_nnz);
}
- }
- else { // Upper triangular matrix.
+ } else { // Upper triangular matrix.
for (int idx2 = idx1, m_col_nnz2 = m_col_nnz1;
idx2 < crsb_rows[row_block + 1];
m_col_nnz2 += col_blocks[COL_BLOCK2], ++idx2, ++cursor) {
@@ -845,5 +830,87 @@
CHECK_EQ(cursor, program.size());
}
+CompressedRowSparseMatrix* CreateRandomCompressedRowSparseMatrix(
+ const RandomMatrixOptions& options) {
+ vector<int> row_blocks;
+ vector<int> col_blocks;
+
+ // Generate the row block structure.
+ for (int i = 0; i < options.num_row_blocks; ++i) {
+ // Generate a random integer in [min_row_block_size, max_row_block_size]
+ const int delta_block_size =
+ Uniform(options.max_row_block_size - options.min_row_block_size);
+ row_blocks.push_back(options.min_row_block_size + delta_block_size);
+ }
+
+ // Generate the col block structure.
+ for (int i = 0; i < options.num_col_blocks; ++i) {
+ // Generate a random integer in [min_row_block_size, max_row_block_size]
+ const int delta_block_size =
+ Uniform(options.max_col_block_size - options.min_col_block_size);
+ col_blocks.push_back(options.min_col_block_size + delta_block_size);
+ }
+
+ vector<int> crsb_rows;
+ vector<int> crsb_cols;
+ vector<int> tsm_rows;
+ vector<int> tsm_cols;
+ vector<double> tsm_values;
+
+ // For ease of construction, we are going to generate the
+ // CompressedRowSparseMatrix by generating it as a
+ // TripletSparseMatrix and then converting it to a
+ // CompressedRowSparseMatrix.
+
+ // It is possible that the random matrix is empty which is likely
+ // not what the user wants, so do the matrix generation till we have
+ // at least one non-zero entry.
+ while (tsm_values.size() == 0) {
+ int row_block_begin = 0;
+ crsb_rows.clear();
+ crsb_cols.clear();
+ for (int r = 0; r < options.num_row_blocks; ++r) {
+ int col_block_begin = 0;
+ crsb_rows.push_back(crsb_cols.size());
+ for (int c = 0; c < options.num_col_blocks; ++c) {
+ // Randomly determine if this block is present or not.
+ if (RandDouble() <= options.block_density) {
+ for (int i = 0; i < row_blocks[r]; ++i) {
+ for (int j = 0; j < col_blocks[c]; ++j) {
+ tsm_rows.push_back(row_block_begin + i);
+ tsm_cols.push_back(col_block_begin + j);
+ tsm_values.push_back(RandNormal());
+ }
+ }
+ // Add the block to the block sparse structure.
+ crsb_cols.push_back(c);
+ }
+ col_block_begin += col_blocks[c];
+ }
+ row_block_begin += row_blocks[r];
+ }
+ crsb_rows.push_back(crsb_cols.size());
+ }
+
+ const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
+ const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
+ const int num_nonzeros = tsm_values.size();
+
+ // Create a TripletSparseMatrix
+ TripletSparseMatrix tsm(num_rows, num_cols, num_nonzeros);
+ std::copy(tsm_rows.begin(), tsm_rows.end(), tsm.mutable_rows());
+ std::copy(tsm_cols.begin(), tsm_cols.end(), tsm.mutable_cols());
+ std::copy(tsm_values.begin(), tsm_values.end(), tsm.mutable_values());
+ tsm.set_num_nonzeros(num_nonzeros);
+
+ // Convert the TripletSparseMatrix to a CompressedRowSparseMatrix.
+ CompressedRowSparseMatrix* matrix = new CompressedRowSparseMatrix(tsm);
+ (*matrix->mutable_row_blocks()) = row_blocks;
+ (*matrix->mutable_col_blocks()) = col_blocks;
+ (*matrix->mutable_crsb_rows()) = crsb_rows;
+ (*matrix->mutable_crsb_cols()) = crsb_cols;
+ return matrix;
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index 7b0d677..0bc5d01 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -202,6 +202,41 @@
CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
};
+// Options struct to control the generation of random block sparse
+// matrices in compressed row sparse format.
+//
+// The random matrix generation proceeds as follows.
+//
+// First the row and column block structure is determined by
+// generating random row and column block sizes that lie within the
+// given bounds.
+//
+// Then we walk the block structure of the resulting matrix, and with
+// probability block_density detemine whether they are structurally
+// zero or not. If the answer is no, then we generate entries for the
+// block which are distributed normally.
+struct RandomMatrixOptions {
+ int num_row_blocks;
+ int min_row_block_size;
+ int max_row_block_size;
+ int num_col_blocks;
+ int min_col_block_size;
+ int max_col_block_size;
+
+ // 0 <= block_density <= 1 is the probability of a block being
+ // present in the matrix. A given random matrix will not have
+ // precisely this density.
+ double block_density;
+};
+
+// Create a random CompressedRowSparseMatrix whose entries are
+// normally distributed and whose structure is determined by
+// RandomMatrixOptions.
+//
+// Caller owns the result.
+CompressedRowSparseMatrix* CreateRandomCompressedRowSparseMatrix(
+ const RandomMatrixOptions& options);
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 18d95e6..b6e732c 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -33,7 +33,6 @@
#include <numeric>
#include "ceres/casts.h"
#include "ceres/crs_matrix.h"
-#include "ceres/cxsparse.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_least_squares_problems.h"
@@ -42,6 +41,8 @@
#include "glog/logging.h"
#include "gtest/gtest.h"
+#include "Eigen/SparseCore"
+
namespace ceres {
namespace internal {
@@ -445,95 +446,6 @@
EXPECT_NEAR((dense_matrix - dense_transpose.transpose()).norm(), 0.0, 1e-14);
}
-#ifndef CERES_NO_CXSPARSE
-
-struct RandomMatrixOptions {
- int num_row_blocks;
- int min_row_block_size;
- int max_row_block_size;
- int num_col_blocks;
- int min_col_block_size;
- int max_col_block_size;
- double block_density;
-};
-
-CompressedRowSparseMatrix* CreateRandomCompressedRowSparseMatrix(
- const RandomMatrixOptions& options) {
- vector<int> row_blocks;
- for (int i = 0; i < options.num_row_blocks; ++i) {
- const int delta_block_size =
- Uniform(options.max_row_block_size - options.min_row_block_size);
- row_blocks.push_back(options.min_row_block_size + delta_block_size);
- }
-
- vector<int> col_blocks;
- for (int i = 0; i < options.num_col_blocks; ++i) {
- const int delta_block_size =
- Uniform(options.max_col_block_size - options.min_col_block_size);
- col_blocks.push_back(options.min_col_block_size + delta_block_size);
- }
-
- vector<int> rows;
- vector<int> cols;
- vector<double> values;
-
- vector<int> crsb_rows;
- vector<int> crsb_cols;
-
- while (values.size() == 0) {
- int row_block_begin = 0;
- crsb_rows.clear();
- crsb_cols.clear();
- for (int r = 0; r < options.num_row_blocks; ++r) {
- int col_block_begin = 0;
- crsb_rows.push_back(crsb_cols.size());
- for (int c = 0; c < options.num_col_blocks; ++c) {
- if (RandDouble() <= options.block_density) {
- for (int i = 0; i < row_blocks[r]; ++i) {
- for (int j = 0; j < col_blocks[c]; ++j) {
- rows.push_back(row_block_begin + i);
- cols.push_back(col_block_begin + j);
- values.push_back(RandNormal());
- }
- }
- crsb_cols.push_back(c);
- }
- col_block_begin += col_blocks[c];
- }
- row_block_begin += row_blocks[r];
- }
- crsb_rows.push_back(crsb_cols.size());
- }
-
- const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
- const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
- const int num_nonzeros = values.size();
-
- TripletSparseMatrix tsm(num_rows, num_cols, num_nonzeros);
- std::copy(rows.begin(), rows.end(), tsm.mutable_rows());
- std::copy(cols.begin(), cols.end(), tsm.mutable_cols());
- std::copy(values.begin(), values.end(), tsm.mutable_values());
- tsm.set_num_nonzeros(num_nonzeros);
- CompressedRowSparseMatrix* matrix = new CompressedRowSparseMatrix(tsm);
- (*matrix->mutable_row_blocks()) = row_blocks;
- (*matrix->mutable_col_blocks()) = col_blocks;
- (*matrix->mutable_crsb_rows()) = crsb_rows;
- (*matrix->mutable_crsb_cols()) = crsb_cols;
- return matrix;
-}
-
-void ToDenseMatrix(const cs_di* matrix, Matrix* dense_matrix) {
- dense_matrix->resize(matrix->m, matrix->n);
- dense_matrix->setZero();
-
- for (int c = 0; c < matrix->n; ++c) {
- for (int idx = matrix->p[c]; idx < matrix->p[c + 1]; ++idx) {
- const int r = matrix->i[idx];
- (*dense_matrix)(r, c) = matrix->x[idx];
- }
- }
-}
-
TEST(CompressedRowSparseMatrix, ComputeOuterProduct) {
// "Randomly generated seed."
SetRandomState(29823);
@@ -541,13 +453,9 @@
int kMaxNumColBlocks = 10;
int kNumTrials = 10;
- CXSparse cxsparse;
- const double kTolerance = 1e-18;
-
- // Create a random matrix, compute its outer product using CXSParse
- // and ComputeOuterProduct. Convert both matrices to dense matrices
- // and compare their upper triangular parts. They should be within
- // kTolerance of each other.
+ // Create a random matrix, compute its outer product using Eigen and
+ // ComputeOuterProduct. Convert both matrices to dense matrices and
+ // compare their upper triangular parts.
for (int num_row_blocks = 1;
num_row_blocks < kMaxNumRowBlocks;
++num_row_blocks) {
@@ -572,57 +480,57 @@
VLOG(2) << "max col block size: " << options.max_col_block_size;
VLOG(2) << "block density: " << options.block_density;
- scoped_ptr<CompressedRowSparseMatrix> matrix(
+ scoped_ptr<CompressedRowSparseMatrix> random_matrix(
CreateRandomCompressedRowSparseMatrix(options));
- cs_di cs_matrix_transpose =
- cxsparse.CreateSparseMatrixTransposeView(matrix.get());
- cs_di* cs_matrix = cxsparse.TransposeMatrix(&cs_matrix_transpose);
- cs_di* expected_outer_product =
- cxsparse.MatrixMatrixMultiply(&cs_matrix_transpose, cs_matrix);
+ Eigen::MappedSparseMatrix<double, Eigen::RowMajor> mapped_random_matrix(
+ random_matrix->num_rows(),
+ random_matrix->num_cols(),
+ random_matrix->num_nonzeros(),
+ random_matrix->mutable_rows(),
+ random_matrix->mutable_cols(),
+ random_matrix->mutable_values());
- // Use compressed row lower triangular matrix for cxsparse.
+ Matrix expected_outer_product =
+ mapped_random_matrix.transpose() * mapped_random_matrix;
+
+ // Use compressed row lower triangular matrix.
const int stype = 1;
vector<int> program;
scoped_ptr<CompressedRowSparseMatrix> outer_product(
CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
- *matrix, stype, &program));
- CompressedRowSparseMatrix::ComputeOuterProduct(*matrix,
+ *random_matrix, stype, &program));
+ CompressedRowSparseMatrix::ComputeOuterProduct(*random_matrix,
stype,
program,
outer_product.get());
- cs_di actual_outer_product =
- cxsparse.CreateSparseMatrixTransposeView(outer_product.get());
+ Matrix actual_outer_product =
+ Eigen::MappedSparseMatrix<double, Eigen::ColMajor>(
+ outer_product->num_rows(),
+ outer_product->num_rows(),
+ outer_product->num_nonzeros(),
+ outer_product->mutable_rows(),
+ outer_product->mutable_cols(),
+ outer_product->mutable_values());
+ expected_outer_product.triangularView<Eigen::StrictlyLower>().setZero();
+ actual_outer_product.triangularView<Eigen::StrictlyLower>().setZero();
- ASSERT_EQ(actual_outer_product.m, actual_outer_product.n);
- ASSERT_EQ(expected_outer_product->m, expected_outer_product->n);
- ASSERT_EQ(actual_outer_product.m, expected_outer_product->m);
+ EXPECT_EQ(actual_outer_product.rows(), actual_outer_product.cols());
+ EXPECT_EQ(expected_outer_product.rows(), expected_outer_product.cols());
+ EXPECT_EQ(actual_outer_product.rows(), expected_outer_product.rows());
- Matrix actual_matrix;
- Matrix expected_matrix;
-
- ToDenseMatrix(expected_outer_product, &expected_matrix);
- expected_matrix.triangularView<Eigen::StrictlyLower>().setZero();
-
- ToDenseMatrix(&actual_outer_product, &actual_matrix);
- actual_matrix.triangularView<Eigen::StrictlyLower>().setZero();
const double diff_norm =
- (actual_matrix - expected_matrix).norm() / expected_matrix.norm();
- ASSERT_NEAR(diff_norm, 0.0, kTolerance)
+ (actual_outer_product - expected_outer_product).norm() /
+ expected_outer_product.norm();
+ EXPECT_NEAR(diff_norm, 0.0, std::numeric_limits<double>::epsilon())
<< "expected: \n"
- << expected_matrix
- << "\nactual: \n"
- << actual_matrix;
-
- cxsparse.Free(cs_matrix);
- cxsparse.Free(expected_outer_product);
+ << expected_outer_product << "\nactual: \n"
+ << actual_outer_product;
}
}
}
}
-#endif // CERES_NO_CXSPARSE
-
} // namespace internal
} // namespace ceres