Incomplete LQ Factorization.
Drop support for protocol buffers.
Add CompressedRowSparseMatrix::CreateBlockDiagonalMatrix.
Add CompressedRowSparseMatrix::SolveLowerTriangularInPlace.
Add CompressedRowSparseMatrix::SolveLowerTriangularTranposeInPlace.
Add CompressedRowSparseMatrix::Transpose.
Change-Id: I2328afca9fac632685eac72ebb00998bd3510187
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
index 20b28b4..f77c066 100644
--- a/internal/ceres/CMakeLists.txt
+++ b/internal/ceres/CMakeLists.txt
@@ -61,6 +61,7 @@
file.cc
gradient_checking_cost_function.cc
implicit_schur_complement.cc
+ incomplete_lq_factorization.cc
iterative_schur_complement_solver.cc
levenberg_marquardt_strategy.cc
line_search.cc
@@ -263,6 +264,7 @@
CERES_TEST(graph)
CERES_TEST(graph_algorithms)
CERES_TEST(implicit_schur_complement)
+ CERES_TEST(incomplete_lq_factorization)
CERES_TEST(iterative_schur_complement_solver)
CERES_TEST(jet)
CERES_TEST(levenberg_marquardt_strategy)
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index 1b61468..9fb7bd7 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -34,7 +34,8 @@
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
-#include "ceres/matrix_proto.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -72,28 +73,27 @@
int max_num_nonzeros) {
num_rows_ = num_rows;
num_cols_ = num_cols;
- max_num_nonzeros_ = max_num_nonzeros;
+ rows_.resize(num_rows + 1, 0);
+ cols_.resize(max_num_nonzeros, 0);
+ values_.resize(max_num_nonzeros, 0.0);
- VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << max_num_nonzeros_
+
+ VLOG(1) << "# of rows: " << num_rows_
+ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << cols_.size()
<< ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
- max_num_nonzeros_ * sizeof(int) + // NOLINT
- max_num_nonzeros_ * sizeof(double); // NOLINT
-
- rows_.reset(new int[num_rows_ + 1]);
- cols_.reset(new int[max_num_nonzeros_]);
- values_.reset(new double[max_num_nonzeros_]);
-
- fill(rows_.get(), rows_.get() + num_rows_ + 1, 0);
- fill(cols_.get(), cols_.get() + max_num_nonzeros_, 0);
- fill(values_.get(), values_.get() + max_num_nonzeros_, 0);
+ cols_.size() * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(double); // NOLINT
}
CompressedRowSparseMatrix::CompressedRowSparseMatrix(
const TripletSparseMatrix& m) {
num_rows_ = m.num_rows();
num_cols_ = m.num_cols();
- max_num_nonzeros_ = m.max_num_nonzeros();
+
+ rows_.resize(num_rows_ + 1, 0);
+ cols_.resize(m.num_nonzeros(), 0);
+ values_.resize(m.max_num_nonzeros(), 0.0);
// index is the list of indices into the TripletSparseMatrix m.
vector<int> index(m.num_nonzeros(), 0);
@@ -105,18 +105,13 @@
// are broken by column.
sort(index.begin(), index.end(), RowColLessThan(m.rows(), m.cols()));
- VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << max_num_nonzeros_
- << ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
- max_num_nonzeros_ * sizeof(int) + // NOLINT
- max_num_nonzeros_ * sizeof(double); // NOLINT
-
- rows_.reset(new int[num_rows_ + 1]);
- cols_.reset(new int[max_num_nonzeros_]);
- values_.reset(new double[max_num_nonzeros_]);
-
- // rows_ = 0
- fill(rows_.get(), rows_.get() + num_rows_ + 1, 0);
+ VLOG(1) << "# of rows: " << num_rows_
+ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << cols_.size()
+ << ". Allocating "
+ << ((num_rows_ + 1) * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(double)); // NOLINT
// Copy the contents of the cols and values array in the order given
// by index and count the number of entries in each row.
@@ -135,49 +130,15 @@
CHECK_EQ(num_nonzeros(), m.num_nonzeros());
}
-#ifndef CERES_NO_PROTOCOL_BUFFERS
-CompressedRowSparseMatrix::CompressedRowSparseMatrix(
- const SparseMatrixProto& outer_proto) {
- CHECK(outer_proto.has_compressed_row_matrix());
-
- const CompressedRowSparseMatrixProto& proto =
- outer_proto.compressed_row_matrix();
-
- num_rows_ = proto.num_rows();
- num_cols_ = proto.num_cols();
-
- rows_.reset(new int[proto.rows_size()]);
- cols_.reset(new int[proto.cols_size()]);
- values_.reset(new double[proto.values_size()]);
-
- for (int i = 0; i < proto.rows_size(); ++i) {
- rows_[i] = proto.rows(i);
- }
-
- CHECK_EQ(proto.rows_size(), num_rows_ + 1);
- CHECK_EQ(proto.cols_size(), proto.values_size());
- CHECK_EQ(proto.cols_size(), rows_[num_rows_]);
-
- for (int i = 0; i < proto.cols_size(); ++i) {
- cols_[i] = proto.cols(i);
- values_[i] = proto.values(i);
- }
-
- max_num_nonzeros_ = proto.cols_size();
-}
-#endif
-
CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
int num_rows) {
CHECK_NOTNULL(diagonal);
num_rows_ = num_rows;
num_cols_ = num_rows;
- max_num_nonzeros_ = num_rows;
-
- rows_.reset(new int[num_rows_ + 1]);
- cols_.reset(new int[num_rows_]);
- values_.reset(new double[num_rows_]);
+ rows_.resize(num_rows + 1);
+ cols_.resize(num_rows);
+ values_.resize(num_rows);
rows_[0] = 0;
for (int i = 0; i < num_rows_; ++i) {
@@ -193,7 +154,7 @@
}
void CompressedRowSparseMatrix::SetZero() {
- fill(values_.get(), values_.get() + num_nonzeros(), 0.0);
+ fill(values_.begin(), values_.end(), 0);
}
void CompressedRowSparseMatrix::RightMultiply(const double* x,
@@ -248,83 +209,35 @@
}
}
-#ifndef CERES_NO_PROTOCOL_BUFFERS
-void CompressedRowSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
- CHECK_NOTNULL(outer_proto);
-
- outer_proto->Clear();
- CompressedRowSparseMatrixProto* proto
- = outer_proto->mutable_compressed_row_matrix();
-
- proto->set_num_rows(num_rows_);
- proto->set_num_cols(num_cols_);
-
- for (int r = 0; r < num_rows_ + 1; ++r) {
- proto->add_rows(rows_[r]);
- }
-
- for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
- proto->add_cols(cols_[idx]);
- proto->add_values(values_[idx]);
- }
-}
-#endif
-
void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
CHECK_GE(delta_rows, 0);
CHECK_LE(delta_rows, num_rows_);
- int new_num_rows = num_rows_ - delta_rows;
-
- num_rows_ = new_num_rows;
- int* new_rows = new int[num_rows_ + 1];
- copy(rows_.get(), rows_.get() + num_rows_ + 1, new_rows);
- rows_.reset(new_rows);
+ num_rows_ -= delta_rows;
+ rows_.resize(num_rows_ + 1);
}
void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
CHECK_EQ(m.num_cols(), num_cols_);
- // Check if there is enough space. If not, then allocate new arrays
- // to hold the combined matrix and copy the contents of this matrix
- // into it.
- if (max_num_nonzeros_ < num_nonzeros() + m.num_nonzeros()) {
- int new_max_num_nonzeros = num_nonzeros() + m.num_nonzeros();
-
- VLOG(1) << "Reallocating " << sizeof(int) * new_max_num_nonzeros; // NOLINT
-
- int* new_cols = new int[new_max_num_nonzeros];
- copy(cols_.get(), cols_.get() + max_num_nonzeros_, new_cols);
- cols_.reset(new_cols);
-
- double* new_values = new double[new_max_num_nonzeros];
- copy(values_.get(), values_.get() + max_num_nonzeros_, new_values);
- values_.reset(new_values);
-
- max_num_nonzeros_ = new_max_num_nonzeros;
+ if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
+ cols_.resize(num_nonzeros() + m.num_nonzeros());
+ values_.resize(num_nonzeros() + m.num_nonzeros());
}
// Copy the contents of m into this matrix.
- copy(m.cols(), m.cols() + m.num_nonzeros(), cols_.get() + num_nonzeros());
- copy(m.values(),
- m.values() + m.num_nonzeros(),
- values_.get() + num_nonzeros());
-
- // Create the new rows array to hold the enlarged matrix.
- int* new_rows = new int[num_rows_ + m.num_rows() + 1];
- // The first num_rows_ entries are the same
- copy(rows_.get(), rows_.get() + num_rows_, new_rows);
-
+ copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
+ copy(m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
+ rows_.resize(num_rows_ + m.num_rows() + 1);
// new_rows = [rows_, m.row() + rows_[num_rows_]]
- fill(new_rows + num_rows_,
- new_rows + num_rows_ + m.num_rows() + 1,
+ fill(rows_.begin() + num_rows_,
+ rows_.begin() + num_rows_ + m.num_rows() + 1,
rows_[num_rows_]);
for (int r = 0; r < m.num_rows() + 1; ++r) {
- new_rows[num_rows_ + r] += m.rows()[r];
+ rows_[num_rows_ + r] += m.rows()[r];
}
- rows_.reset(new_rows);
num_rows_ += m.num_rows();
}
@@ -332,23 +245,122 @@
CHECK_NOTNULL(file);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
+ fprintf(file,
+ "% 10d % 10d %17f\n",
+ r,
+ cols_[idx],
+ values_[idx]);
}
}
}
void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
- matrix->num_rows = num_rows();
- matrix->num_cols = num_cols();
+ matrix->num_rows = num_rows_;
+ matrix->num_cols = num_cols_;
+ matrix->rows = rows_;
+ matrix->cols = cols_;
+ matrix->values = values_;
+ // Trim.
matrix->rows.resize(matrix->num_rows + 1);
- matrix->cols.resize(num_nonzeros());
- matrix->values.resize(num_nonzeros());
-
- copy(rows_.get(), rows_.get() + matrix->num_rows + 1, matrix->rows.begin());
- copy(cols_.get(), cols_.get() + num_nonzeros(), matrix->cols.begin());
- copy(values_.get(), values_.get() + num_nonzeros(), matrix->values.begin());
+ matrix->cols.resize(matrix->rows[matrix->num_rows]);
+ matrix->values.resize(matrix->rows[matrix->num_rows]);
}
+void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
+ double* solution) const {
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1] - 1; ++idx) {
+ solution[r] -= values_[idx] * solution[cols_[idx]];
+ }
+ solution[r] /= values_[rows_[r + 1] - 1];
+ }
+};
+
+void CompressedRowSparseMatrix::SolveLowerTriangularTransposeInPlace(
+ double* solution) const {
+ for (int r = num_rows_ - 1; r >= 0; --r) {
+ solution[r] /= values_[rows_[r + 1] - 1];
+ for (int idx = rows_[r + 1] - 2; idx >= rows_[r]; --idx) {
+ solution[cols_[idx]] -= values_[idx] * solution[r];
+ }
+ }
+};
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+ const double* diagonal,
+ const vector<int>& blocks) {
+ int num_rows = 0;
+ int num_nonzeros = 0;
+ for (int i = 0; i < blocks.size(); ++i) {
+ num_rows += blocks[i];
+ num_nonzeros += blocks[i] * blocks[i];
+ }
+
+ CompressedRowSparseMatrix* matrix =
+ new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros);
+
+ int* rows = matrix->mutable_rows();
+ int* cols = matrix->mutable_cols();
+ double* values = matrix->mutable_values();
+ fill(values, values + num_nonzeros, 0.0);
+
+ int idx_cursor = 0;
+ int col_cursor = 0;
+ for (int i = 0; i < blocks.size(); ++i) {
+ const int block_size = blocks[i];
+ for (int r = 0; r < block_size; ++r) {
+ *(rows++) = idx_cursor;
+ values[idx_cursor + r] = diagonal[col_cursor + r];
+ for (int c = 0; c < block_size; ++c, ++idx_cursor) {
+ *(cols++) = col_cursor + c;
+ }
+ }
+ col_cursor += block_size;
+ }
+ *rows = idx_cursor;
+
+ *matrix->mutable_row_blocks() = blocks;
+ *matrix->mutable_col_blocks() = blocks;
+
+ CHECK_EQ(idx_cursor, num_nonzeros);
+ CHECK_EQ(col_cursor, num_rows);
+ return matrix;
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
+ CompressedRowSparseMatrix* transpose =
+ new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
+
+ int* transpose_rows = transpose->mutable_rows();
+ int* transpose_cols = transpose->mutable_cols();
+ double* transpose_values = transpose->mutable_values();
+
+ for (int idx = 0; idx < num_nonzeros(); ++idx) {
+ ++transpose_rows[cols_[idx] + 1];
+ }
+
+ for (int i = 1; i < transpose->num_rows() + 1; ++i) {
+ transpose_rows[i] += transpose_rows[i - 1];
+ }
+
+ for (int r = 0; r < num_rows(); ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ const int c = cols_[idx];
+ const int transpose_idx = transpose_rows[c]++;
+ transpose_cols[transpose_idx] = r;
+ transpose_values[transpose_idx] = values_[idx];
+ }
+ }
+
+ for (int i = transpose->num_rows() - 1; i > 0 ; --i) {
+ transpose_rows[i] = transpose_rows[i - 1];
+ }
+ transpose_rows[0] = 0;
+
+ return transpose;
+}
+
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index c9c904b..d65d5c7 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -32,14 +32,10 @@
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include <vector>
-
-#include "ceres/internal/eigen.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/sparse_matrix.h"
-#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
-#include "glog/logging.h"
namespace ceres {
@@ -47,7 +43,7 @@
namespace internal {
-class SparseMatrixProto;
+class TripletSparseMatrix;
class CompressedRowSparseMatrix : public SparseMatrix {
public:
@@ -58,9 +54,6 @@
//
// We assume that m does not have any repeated entries.
explicit CompressedRowSparseMatrix(const TripletSparseMatrix& m);
-#ifndef CERES_NO_PROTOCOL_BUFFERS
- explicit CompressedRowSparseMatrix(const SparseMatrixProto& proto);
-#endif
// Use this constructor only if you know what you are doing. This
// creates a "blank" matrix with the appropriate amount of memory
@@ -91,15 +84,12 @@
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
-#ifndef CERES_NO_PROTOCOL_BUFFERS
- virtual void ToProto(SparseMatrixProto* proto) const;
-#endif
virtual void ToTextFile(FILE* file) const;
virtual int num_rows() const { return num_rows_; }
virtual int num_cols() const { return num_cols_; }
virtual int num_nonzeros() const { return rows_[num_rows_]; }
- virtual const double* values() const { return values_.get(); }
- virtual double* mutable_values() { return values_.get(); }
+ virtual const double* values() const { return &values_[0]; }
+ virtual double* mutable_values() { return &values_[0]; }
// Delete the bottom delta_rows.
// num_rows -= delta_rows
@@ -112,11 +102,11 @@
void ToCRSMatrix(CRSMatrix* matrix) const;
// Low level access methods that expose the structure of the matrix.
- const int* cols() const { return cols_.get(); }
- int* mutable_cols() { return cols_.get(); }
+ const int* cols() const { return &cols_[0]; }
+ int* mutable_cols() { return &cols_[0]; }
- const int* rows() const { return rows_.get(); }
- int* mutable_rows() { return rows_.get(); }
+ const int* rows() const { return &rows_[0]; }
+ int* mutable_rows() { return &rows_[0]; }
const vector<int>& row_blocks() const { return row_blocks_; }
vector<int>* mutable_row_blocks() { return &row_blocks_; }
@@ -124,14 +114,25 @@
const vector<int>& col_blocks() const { return col_blocks_; }
vector<int>* mutable_col_blocks() { return &col_blocks_; }
- private:
- scoped_array<int> cols_;
- scoped_array<int> rows_;
- scoped_array<double> values_;
+ // Non-destructive array resizing method.
+ void set_num_rows(const int num_rows) { num_rows_ = num_rows; };
+ void set_num_cols(const int num_cols) { num_cols_ = num_cols; };
+ void SolveLowerTriangularInPlace(double* solution) const;
+ void SolveLowerTriangularTransposeInPlace(double* solution) const;
+
+ CompressedRowSparseMatrix* Transpose() const;
+
+ static CompressedRowSparseMatrix* CreateBlockDiagonalMatrix(
+ const double* diagonal,
+ const vector<int>& blocks);
+
+ private:
int num_rows_;
int num_cols_;
- int max_num_nonzeros_;
+ vector<int> rows_;
+ vector<int> cols_;
+ vector<double> values_;
// If the matrix has an underlying block structure, then it can also
// carry with it row and column block sizes. This is auxilliary and
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index c9c3f14..63b66a0 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -35,8 +35,8 @@
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_least_squares_problems.h"
-#include "ceres/matrix_proto.h"
#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
#include "gtest/gtest.h"
namespace ceres {
@@ -146,30 +146,6 @@
}
}
-#ifndef CERES_NO_PROTOCOL_BUFFERS
-TEST_F(CompressedRowSparseMatrixTest, Serialization) {
- SparseMatrixProto proto;
- crsm->ToProto(&proto);
-
- CompressedRowSparseMatrix n(proto);
- ASSERT_EQ(n.num_rows(), crsm->num_rows());
- ASSERT_EQ(n.num_cols(), crsm->num_cols());
- ASSERT_EQ(n.num_nonzeros(), crsm->num_nonzeros());
-
- for (int i = 0; i < n.num_rows() + 1; ++i) {
- ASSERT_EQ(crsm->rows()[i], proto.compressed_row_matrix().rows(i));
- ASSERT_EQ(crsm->rows()[i], n.rows()[i]);
- }
-
- for (int i = 0; i < crsm->num_nonzeros(); ++i) {
- ASSERT_EQ(crsm->cols()[i], proto.compressed_row_matrix().cols(i));
- ASSERT_EQ(crsm->cols()[i], n.cols()[i]);
- ASSERT_EQ(crsm->values()[i], proto.compressed_row_matrix().values(i));
- ASSERT_EQ(crsm->values()[i], n.values()[i]);
- }
-}
-#endif
-
TEST_F(CompressedRowSparseMatrixTest, ToDenseMatrix) {
Matrix tsm_dense;
Matrix crsm_dense;
@@ -199,5 +175,155 @@
}
}
+TEST(CompressedRowSparseMatrix, CreateBlockDiagonalMatrix) {
+ vector<int> blocks;
+ blocks.push_back(1);
+ blocks.push_back(2);
+ blocks.push_back(2);
+
+ Vector diagonal(5);
+ for (int i = 0; i < 5; ++i) {
+ diagonal(i) = i + 1;
+ }
+
+ scoped_ptr<CompressedRowSparseMatrix> matrix(
+ CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+ diagonal.data(), blocks));
+
+ EXPECT_EQ(matrix->num_rows(), 5);
+ EXPECT_EQ(matrix->num_cols(), 5);
+ EXPECT_EQ(matrix->num_nonzeros(), 9);
+ EXPECT_EQ(blocks, matrix->row_blocks());
+ EXPECT_EQ(blocks, matrix->col_blocks());
+
+ Vector x(5);
+ Vector y(5);
+
+ x.setOnes();
+ y.setZero();
+ matrix->RightMultiply(x.data(), y.data());
+ for (int i = 0; i < diagonal.size(); ++i) {
+ EXPECT_EQ(y[i], diagonal[i]);
+ }
+
+ y.setZero();
+ matrix->LeftMultiply(x.data(), y.data());
+ for (int i = 0; i < diagonal.size(); ++i) {
+ EXPECT_EQ(y[i], diagonal[i]);
+ };
+
+ Matrix dense;
+ matrix->ToDenseMatrix(&dense);
+ EXPECT_EQ((dense.diagonal() - diagonal).norm(), 0.0);
+}
+
+class SolveLowerTriangularTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ matrix_.reset(new CompressedRowSparseMatrix(4, 4, 7));
+ int* rows = matrix_->mutable_rows();
+ int* cols = matrix_->mutable_cols();
+ double* values = matrix_->mutable_values();
+
+ rows[0] = 0;
+ cols[0] = 0;
+ values[0] = 0.50754;
+
+ rows[1] = 1;
+ cols[1] = 1;
+ values[1] = 0.80483;
+
+ rows[2] = 2;
+ cols[2] = 1;
+ values[2] = 0.14120;
+ cols[3] = 2;
+ values[3] = 0.3;
+
+ rows[3] = 4;
+ cols[4] = 0;
+ values[4] = 0.77696;
+ cols[5] = 1;
+ values[5] = 0.41860;
+ cols[6] = 3;
+ values[6] = 0.88979;
+
+ rows[4] = 7;
+ }
+
+ scoped_ptr<CompressedRowSparseMatrix> matrix_;
+};
+
+TEST_F(SolveLowerTriangularTest, SolveInPlace) {
+ double rhs_and_solution[] = {1.0, 1.0, 2.0, 2.0};
+ double expected[] = {1.970288, 1.242498, 6.081864, -0.057255};
+ matrix_->SolveLowerTriangularInPlace(rhs_and_solution);
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_NEAR(rhs_and_solution[i], expected[i], 1e-4) << i;
+ }
+}
+
+TEST_F(SolveLowerTriangularTest, TransposeSolveInPlace) {
+ double rhs_and_solution[] = {1.0, 1.0, 2.0, 2.0};
+ const double expected[] = { -1.4706, -1.0962, 6.6667, 2.2477};
+
+ matrix_->SolveLowerTriangularTransposeInPlace(rhs_and_solution);
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_NEAR(rhs_and_solution[i], expected[i], 1e-4) << i;
+ }
+}
+
+TEST(CompressedRowSparseMatrix, Transpose) {
+ // 0 1 0 2 3 0
+ // 4 6 7 0 0 8
+ // 9 10 0 11 12 0
+ // 13 0 14 15 9 0
+ // 0 16 17 0 0 0
+
+ CompressedRowSparseMatrix matrix(5, 6, 30);
+ int* rows = matrix.mutable_rows();
+ int* cols = matrix.mutable_cols();
+ double* values = matrix.mutable_values();
+
+ rows[0] = 0;
+ cols[0] = 1;
+ cols[1] = 3;
+ cols[2] = 4;
+
+ rows[1] = 3;
+ cols[3] = 0;
+ cols[4] = 1;
+ cols[5] = 2;
+ cols[6] = 5;
+
+
+ rows[2] = 7;
+ cols[7] = 0;
+ cols[8] = 1;
+ cols[9] = 3;
+ cols[10] = 4;
+
+ rows[3] = 11;
+ cols[11] = 0;
+ cols[12] = 2;
+ cols[13] = 3;
+ cols[14] = 4;
+
+ rows[4] = 15;
+ cols[15] = 1;
+ cols[16] = 2;
+ rows[5] = 17;
+
+ copy(values, values + 17, cols);
+
+ scoped_ptr<CompressedRowSparseMatrix> transpose(matrix.Transpose());
+
+ Matrix dense_matrix;
+ matrix.ToDenseMatrix(&dense_matrix);
+
+ Matrix dense_transpose;
+ transpose->ToDenseMatrix(&dense_transpose);
+ EXPECT_NEAR((dense_matrix - dense_transpose.transpose()).norm(), 0.0, 1e-14);
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/incomplete_lq_factorization.cc b/internal/ceres/incomplete_lq_factorization.cc
new file mode 100644
index 0000000..0d51f9a
--- /dev/null
+++ b/internal/ceres/incomplete_lq_factorization.cc
@@ -0,0 +1,239 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/incomplete_lq_factorization.h"
+
+#include <vector>
+#include <utility>
+#include <cmath>
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Normalize a row and return it's norm.
+inline double NormalizeRow(const int row, CompressedRowSparseMatrix* matrix) {
+ const int row_begin = matrix->rows()[row];
+ const int row_end = matrix->rows()[row + 1];
+
+ double* values = matrix->mutable_values();
+ double norm = 0.0;
+ for (int i = row_begin; i < row_end; ++i) {
+ norm += values[i] * values[i];
+ }
+
+ norm = sqrt(norm);
+ const double inverse_norm = 1.0 / norm;
+ for (int i = row_begin; i < row_end; ++i) {
+ values[i] *= inverse_norm;
+ }
+
+ return norm;
+}
+
+// Compute a(row_a,:) * b(row_b, :)'
+inline double RowDotProduct(const CompressedRowSparseMatrix& a,
+ const int row_a,
+ const CompressedRowSparseMatrix& b,
+ const int row_b) {
+ const int* a_rows = a.rows();
+ const int* a_cols = a.cols();
+ const double* a_values = a.values();
+
+ const int* b_rows = b.rows();
+ const int* b_cols = b.cols();
+ const double* b_values = b.values();
+
+ const int row_a_end = a_rows[row_a + 1];
+ const int row_b_end = b_rows[row_b + 1];
+
+ int idx_a = a_rows[row_a];
+ int idx_b = b_rows[row_b];
+ double dot_product = 0.0;
+ while (idx_a < row_a_end && idx_b < row_b_end) {
+ if (a_cols[idx_a] == b_cols[idx_b]) {
+ dot_product += a_values[idx_a++] * b_values[idx_b++];
+ }
+
+ while (a_cols[idx_a] < b_cols[idx_b] && idx_a < row_a_end) {
+ ++idx_a;
+ }
+
+ while (a_cols[idx_a] > b_cols[idx_b] && idx_b < row_b_end) {
+ ++idx_b;
+ }
+ }
+
+ return dot_product;
+}
+
+struct SecondGreaterThan {
+ public:
+ bool operator()(const pair<int, double>& lhs,
+ const pair<int, double>& rhs) const {
+ return (fabs(lhs.second) > fabs(rhs.second));
+ }
+};
+
+// In the row vector dense_row(0:num_cols), drop values smaller than
+// the max_value * drop_tolerance. Of the remaining non-zero values,
+// choose at most level_of_fill values and then add the resulting row
+// vector to matrix.
+
+void DropEntriesAndAddRow(const Vector& dense_row,
+ const int num_entries,
+ const int level_of_fill,
+ const double drop_tolerance,
+ vector<pair<int, double> >* scratch,
+ CompressedRowSparseMatrix* matrix) {
+ int* rows = matrix->mutable_rows();
+ int* cols = matrix->mutable_cols();
+ double* values = matrix->mutable_values();
+ int num_nonzeros = rows[matrix->num_rows()];
+
+ if (num_entries == 0) {
+ matrix->set_num_rows(matrix->num_rows() + 1);
+ rows[matrix->num_rows()] = num_nonzeros;
+ return;
+ }
+
+ const double max_value = dense_row.head(num_entries).cwiseAbs().maxCoeff();
+ const double threshold = drop_tolerance * max_value;
+
+ int scratch_count = 0;
+ for (int i = 0; i < num_entries; ++i) {
+ if (fabs(dense_row[i]) > threshold) {
+ pair<int, double>& entry = (*scratch)[scratch_count];
+ entry.first = i;
+ entry.second = dense_row[i];
+ ++scratch_count;
+ }
+ }
+
+ if (scratch_count > level_of_fill) {
+ nth_element(scratch->begin(),
+ scratch->begin() + level_of_fill,
+ scratch->begin() + scratch_count,
+ SecondGreaterThan());
+ scratch_count = level_of_fill;
+ sort(scratch->begin(), scratch->begin() + scratch_count);
+ }
+
+ for (int i = 0; i < scratch_count; ++i) {
+ const pair<int, double>& entry = (*scratch)[i];
+ cols[num_nonzeros] = entry.first;
+ values[num_nonzeros] = entry.second;
+ ++num_nonzeros;
+ }
+
+ matrix->set_num_rows(matrix->num_rows() + 1);
+ rows[matrix->num_rows()] = num_nonzeros;
+}
+
+// Saad's Incomplete LQ factorization algorithm.
+CompressedRowSparseMatrix* IncompleteLQFactorization(
+ const CompressedRowSparseMatrix& matrix,
+ const int l_level_of_fill,
+ const double l_drop_tolerance,
+ const int q_level_of_fill,
+ const double q_drop_tolerance) {
+ const int num_rows = matrix.num_rows();
+ const int num_cols = matrix.num_cols();
+ const int* rows = matrix.rows();
+ const int* cols = matrix.cols();
+ const double* values = matrix.values();
+
+ CompressedRowSparseMatrix* l =
+ new CompressedRowSparseMatrix(num_rows,
+ num_rows,
+ l_level_of_fill * num_rows);
+ l->set_num_rows(0);
+
+ CompressedRowSparseMatrix q(num_rows, num_cols, q_level_of_fill * num_rows);
+ q.set_num_rows(0);
+
+ int* l_rows = l->mutable_rows();
+ int* l_cols = l->mutable_cols();
+ double* l_values = l->mutable_values();
+
+ int* q_rows = q.mutable_rows();
+ int* q_cols = q.mutable_cols();
+ double* q_values = q.mutable_values();
+
+ Vector l_i(num_rows);
+ Vector q_i(num_cols);
+ vector<pair<int, double> > scratch(num_cols);
+ for (int i = 0; i < num_rows; ++i) {
+ // l_i = q * matrix(i,:)');
+ l_i.setZero();
+ for (int j = 0; j < i; ++j) {
+ l_i(j) = RowDotProduct(matrix, i, q, j);
+ }
+ DropEntriesAndAddRow(l_i,
+ i,
+ l_level_of_fill,
+ l_drop_tolerance,
+ &scratch,
+ l);
+
+ // q_i = matrix(i,:) - q(0:i-1,:) * l_i);
+ q_i.setZero();
+ for (int idx = rows[i]; idx < rows[i + 1]; ++idx) {
+ q_i(cols[idx]) = values[idx];
+ }
+
+ for (int j = l_rows[i]; j < l_rows[i + 1]; ++j) {
+ const int r = l_cols[j];
+ const double lij = l_values[j];
+ for (int idx = q_rows[r]; idx < q_rows[r + 1]; ++idx) {
+ q_i(q_cols[idx]) -= lij * q_values[idx];
+ }
+ }
+ DropEntriesAndAddRow(q_i,
+ num_cols,
+ q_level_of_fill,
+ q_drop_tolerance,
+ &scratch,
+ &q);
+
+ // lii = |qi|
+ l_cols[l->num_nonzeros()] = i;
+ l_values[l->num_nonzeros()] = NormalizeRow(i, &q);
+ l_rows[l->num_rows()] += 1;
+ };
+
+ return l;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/incomplete_lq_factorization.h b/internal/ceres/incomplete_lq_factorization.h
new file mode 100644
index 0000000..4f3da3c
--- /dev/null
+++ b/internal/ceres/incomplete_lq_factorization.h
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+// Incomplete LQ factorization as described in
+//
+// Preconditioning techniques for indefinite and nonsymmetric linear
+// systems. Yousef Saad, Preprint RIACS-ILQ-TR, RIACS, NASA Ames
+// Research Center, Moffett Field, CA, 1987.
+//
+// An incomplete LQ factorization of a matrix A is a decomposition
+//
+// A = LQ + E
+//
+// Where L is a lower triangular matrix, and Q is a near orthonormal
+// matrix. The extent of orthonormality depends on E. E is the "drop"
+// matrix. Each row of L has a maximum of l_level_of_fill entries, and
+// all non-zero entries are within l_drop_tolerance of the largest
+// entry. Each row of Q has a maximum of q_level_of_fill entries and
+// all non-zero entries are within q_drop_tolerance of the largest
+// entry.
+//
+// E is the error of the incomplete factorization.
+//
+// The purpose of incomplete factorizations is preconditioning and
+// there one only needs the L matrix, therefore this function just
+// returns L.
+//
+// Caller owns the result.
+CompressedRowSparseMatrix* IncompleteLQFactorization(
+ const CompressedRowSparseMatrix& matrix,
+ const int l_level_of_fill,
+ const double l_drop_tolerance,
+ const int q_level_of_fill,
+ const double q_drop_tolerance);
+
+// In the row vector dense_row(0:num_cols), drop values smaller than
+// the max_value * drop_tolerance. Of the remaining non-zero values,
+// choose at most level_of_fill values and then add the resulting row
+// vector to matrix.
+//
+// scratch is used to prevent allocations inside this function. It is
+// assumed that scratch is of size matrix->num_cols().
+void DropEntriesAndAddRow(const Vector& dense_row,
+ const int num_entries,
+ const int level_of_fill,
+ const double drop_tolerance,
+ vector<pair<int, double> >* scratch,
+ CompressedRowSparseMatrix* matrix);
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/incomplete_lq_factorization_test.cc b/internal/ceres/incomplete_lq_factorization_test.cc
new file mode 100644
index 0000000..0a43f02
--- /dev/null
+++ b/internal/ceres/incomplete_lq_factorization_test.cc
@@ -0,0 +1,196 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/incomplete_lq_factorization.h"
+
+#include "Eigen/Dense"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+void ExpectMatricesAreEqual(const CompressedRowSparseMatrix& expected,
+ const CompressedRowSparseMatrix& actual,
+ const double tolerance) {
+ EXPECT_EQ(expected.num_rows(), actual.num_rows());
+ EXPECT_EQ(expected.num_cols(), actual.num_cols());
+ for (int i = 0; i < expected.num_rows(); ++i) {
+ EXPECT_EQ(expected.rows()[i], actual.rows()[i]);
+ }
+
+ for (int i = 0; i < actual.num_nonzeros(); ++i) {
+ EXPECT_EQ(expected.cols()[i], actual.cols()[i]);
+ EXPECT_NEAR(expected.values()[i], actual.values()[i], tolerance);
+ }
+}
+
+TEST(IncompleteQRFactorization, OneByOneMatrix) {
+ CompressedRowSparseMatrix matrix(1, 1, 1);
+ matrix.mutable_rows()[0] = 0;
+ matrix.mutable_rows()[1] = 1;
+ matrix.mutable_cols()[0] = 0;
+ matrix.mutable_values()[0] = 2;
+
+ scoped_ptr<CompressedRowSparseMatrix> l(
+ IncompleteLQFactorization(matrix, 1, 0.0, 1, 0.0));
+ ExpectMatricesAreEqual(matrix, *l, 1e-16);
+}
+
+TEST(IncompleteLQFactorization, CompleteFactorization) {
+ double dense_matrix[] = {
+ 0.00000, 0.00000, 0.20522, 0.00000, 0.49077, 0.92835, 0.00000, 0.83825, 0.00000, 0.00000, // NOLINT
+ 0.00000, 0.00000, 0.00000, 0.62491, 0.38144, 0.00000, 0.79394, 0.79178, 0.00000, 0.44382, // NOLINT
+ 0.00000, 0.00000, 0.00000, 0.61517, 0.55941, 0.00000, 0.00000, 0.00000, 0.00000, 0.60664, // NOLINT
+ 0.00000, 0.00000, 0.00000, 0.00000, 0.45031, 0.00000, 0.64132, 0.00000, 0.38832, 0.00000, // NOLINT
+ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.57121, 0.00000, 0.01375, 0.70640, 0.00000, // NOLINT
+ 0.00000, 0.00000, 0.07451, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, // NOLINT
+ 0.68095, 0.00000, 0.00000, 0.95473, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, // NOLINT
+ 0.00000, 0.00000, 0.00000, 0.00000, 0.59374, 0.00000, 0.00000, 0.00000, 0.49139, 0.00000, // NOLINT
+ 0.91276, 0.96641, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.91797, // NOLINT
+ 0.96828, 0.00000, 0.00000, 0.72583, 0.00000, 0.00000, 0.81459, 0.00000, 0.04560, 0.00000 // NOLINT
+ };
+
+ CompressedRowSparseMatrix matrix(10, 10, 100);
+ int* rows = matrix.mutable_rows();
+ int* cols = matrix.mutable_cols();
+ double* values = matrix.mutable_values();
+
+ int idx = 0;
+ for (int i = 0; i < 10; ++i) {
+ rows[i] = idx;
+ for (int j = 0; j < 10; ++j) {
+ const double v = dense_matrix[i * 10 + j];
+ if (fabs(v) > 1e-6) {
+ cols[idx] = j;
+ values[idx] = v;
+ ++idx;
+ }
+ }
+ }
+ rows[10] = idx;
+
+ scoped_ptr<CompressedRowSparseMatrix> lmatrix(
+ IncompleteLQFactorization(matrix, 10, 0.0, 10, 0.0));
+
+ ConstMatrixRef mref(dense_matrix, 10, 10);
+
+ // Use Cholesky factorization to compute the L matrix.
+ Matrix expected_l_matrix = (mref * mref.transpose()).llt().matrixL();
+ Matrix actual_l_matrix;
+ lmatrix->ToDenseMatrix(&actual_l_matrix);
+
+ EXPECT_NEAR((expected_l_matrix * expected_l_matrix.transpose() -
+ actual_l_matrix * actual_l_matrix.transpose()).norm(), 0.0, 1e-10)
+ << "expected: \n" << expected_l_matrix
+ << "\actual: \n" << actual_l_matrix;
+}
+
+TEST(IncompleteLQFactorization, DropEntriesAndAddRow) {
+ // Allocate space and then make it a zero sized matrix.
+ CompressedRowSparseMatrix matrix(10,10, 100);
+ matrix.set_num_rows(0);
+
+ vector<pair<int, double> > scratch(10);
+
+ Vector dense_vector(10);
+ dense_vector(0) = 5;
+ dense_vector(1) = 1;
+ dense_vector(2) = 2;
+ dense_vector(3) = 3;
+ dense_vector(4) = 1;
+ dense_vector(5) = 4;
+
+ // Add a row with just one entry.
+ DropEntriesAndAddRow(dense_vector, 1, 1, 0, &scratch, &matrix);
+ EXPECT_EQ(matrix.num_rows(), 1);
+ EXPECT_EQ(matrix.num_cols(), 10);
+ EXPECT_EQ(matrix.num_nonzeros(), 1);
+ EXPECT_EQ(matrix.values()[0], 5.0);
+ EXPECT_EQ(matrix.cols()[0], 0);
+
+ // Add a row with six entries
+ DropEntriesAndAddRow(dense_vector, 6, 10, 0, &scratch, &matrix);
+ EXPECT_EQ(matrix.num_rows(), 2);
+ EXPECT_EQ(matrix.num_cols(), 10);
+ EXPECT_EQ(matrix.num_nonzeros(), 7);
+ for (int idx = matrix.rows()[1]; idx < matrix.rows()[2]; ++idx) {
+ EXPECT_EQ(matrix.cols()[idx], idx - matrix.rows()[1]);
+ EXPECT_EQ(matrix.values()[idx], dense_vector(idx - matrix.rows()[1]));
+ }
+
+ // Add the top 3 entries.
+ DropEntriesAndAddRow(dense_vector, 6, 3, 0, &scratch, &matrix);
+ EXPECT_EQ(matrix.num_rows(), 3);
+ EXPECT_EQ(matrix.num_cols(), 10);
+ EXPECT_EQ(matrix.num_nonzeros(), 10);
+
+ EXPECT_EQ(matrix.cols()[matrix.rows()[2]], 0);
+ EXPECT_EQ(matrix.cols()[matrix.rows()[2] + 1], 3);
+ EXPECT_EQ(matrix.cols()[matrix.rows()[2] + 2], 5);
+
+ EXPECT_EQ(matrix.values()[matrix.rows()[2]], 5);
+ EXPECT_EQ(matrix.values()[matrix.rows()[2] + 1], 3);
+ EXPECT_EQ(matrix.values()[matrix.rows()[2] + 2], 4);
+
+ // Only keep entries greater than 1.0;
+ DropEntriesAndAddRow(dense_vector, 6, 6, 0.2, &scratch, &matrix);
+ EXPECT_EQ(matrix.num_rows(), 4);
+ EXPECT_EQ(matrix.num_cols(), 10);
+ EXPECT_EQ(matrix.num_nonzeros(), 14);
+
+ EXPECT_EQ(matrix.cols()[matrix.rows()[3]], 0);
+ EXPECT_EQ(matrix.cols()[matrix.rows()[3] + 1], 2);
+ EXPECT_EQ(matrix.cols()[matrix.rows()[3] + 2], 3);
+ EXPECT_EQ(matrix.cols()[matrix.rows()[3] + 3], 5);
+
+ EXPECT_EQ(matrix.values()[matrix.rows()[3]], 5);
+ EXPECT_EQ(matrix.values()[matrix.rows()[3] + 1], 2);
+ EXPECT_EQ(matrix.values()[matrix.rows()[3] + 2], 3);
+ EXPECT_EQ(matrix.values()[matrix.rows()[3] + 3], 4);
+
+ // Only keep the top 2 entries greater than 1.0
+ DropEntriesAndAddRow(dense_vector, 6, 2, 0.2, &scratch, &matrix);
+ EXPECT_EQ(matrix.num_rows(), 5);
+ EXPECT_EQ(matrix.num_cols(), 10);
+ EXPECT_EQ(matrix.num_nonzeros(), 16);
+
+ EXPECT_EQ(matrix.cols()[matrix.rows()[4]], 0);
+ EXPECT_EQ(matrix.cols()[matrix.rows()[4] + 1], 5);
+
+ EXPECT_EQ(matrix.values()[matrix.rows()[4]], 5);
+ EXPECT_EQ(matrix.values()[matrix.rows()[4] + 1], 4);
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
index d1ee7f0..df6d03f 100644
--- a/internal/ceres/linear_least_squares_problems.cc
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -36,7 +36,6 @@
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/casts.h"
-#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/file.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/matrix_proto.h"
@@ -82,7 +81,7 @@
} else if (A.has_triplet_matrix()) {
problem->A.reset(new TripletSparseMatrix(A));
} else {
- problem->A.reset(new CompressedRowSparseMatrix(A));
+ LOG(FATAL) << "Broken.";
}
if (problem_proto.b_size() > 0) {