Optimize the computation of the LM diagonal in TinySolver This eliminates an entire vector and computation of a square root followed by a squaring. Thanks to @rlabbe for pointing this out. Change-Id: I0de117b31b9332c61e687f18466d7cb2e2ac611e
diff --git a/include/ceres/tiny_solver.h b/include/ceres/tiny_solver.h index 678c28f..9242cd0 100644 --- a/include/ceres/tiny_solver.h +++ b/include/ceres/tiny_solver.h
@@ -248,10 +248,9 @@ jtj_regularized_ = jtj_; const Scalar min_diagonal = 1e-6; const Scalar max_diagonal = 1e32; - for (int i = 0; i < lm_diagonal_.rows(); ++i) { - lm_diagonal_[i] = std::sqrt( - u * (std::min)((std::max)(jtj_(i, i), min_diagonal), max_diagonal)); - jtj_regularized_(i, i) += lm_diagonal_[i] * lm_diagonal_[i]; + for (int i = 0; i < dx_.rows(); ++i) { + jtj_regularized_(i, i) += + u * (std::min)((std::max)(jtj_(i, i), min_diagonal), max_diagonal); } // TODO(sameeragarwal): Check for failure and deal with it. @@ -338,7 +337,7 @@ // linear system. This allows reusing the intermediate storage across solves. LinearSolver linear_solver_; Scalar cost_; - Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_; + Parameters dx_, x_new_, g_, jacobi_scaling_, lm_step_; Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> residuals_, f_x_new_; Eigen::Matrix<Scalar, NUM_RESIDUALS, NUM_PARAMETERS> jacobian_; Eigen::Matrix<Scalar, NUM_PARAMETERS, NUM_PARAMETERS> jtj_, jtj_regularized_; @@ -385,7 +384,6 @@ x_new_.resize(num_parameters); g_.resize(num_parameters); jacobi_scaling_.resize(num_parameters); - lm_diagonal_.resize(num_parameters); lm_step_.resize(num_parameters); residuals_.resize(num_residuals); f_x_new_.resize(num_residuals);