Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 1 | // Ceres Solver - A fast non-linear least squares minimizer |
| 2 | // Copyright 2012 Google Inc. All rights reserved. |
| 3 | // http://code.google.com/p/ceres-solver/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are met: |
| 7 | // |
| 8 | // * Redistributions of source code must retain the above copyright notice, |
| 9 | // this list of conditions and the following disclaimer. |
| 10 | // * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | // this list of conditions and the following disclaimer in the documentation |
| 12 | // and/or other materials provided with the distribution. |
| 13 | // * Neither the name of Google Inc. nor the names of its contributors may be |
| 14 | // used to endorse or promote products derived from this software without |
| 15 | // specific prior written permission. |
| 16 | // |
| 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 18 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 21 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 22 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 23 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 24 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 25 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 26 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 27 | // POSSIBILITY OF SUCH DAMAGE. |
| 28 | // |
| 29 | // Author: sameeragarwal@google.com (Sameer Agarwal) |
| 30 | |
Sameer Agarwal | 80a53ee | 2014-01-09 12:40:54 -0800 | [diff] [blame] | 31 | #include <list> |
| 32 | |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 33 | #include "ceres/internal/eigen.h" |
Sameer Agarwal | 9883fc3 | 2012-11-30 12:32:43 -0800 | [diff] [blame] | 34 | #include "ceres/low_rank_inverse_hessian.h" |
| 35 | #include "glog/logging.h" |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 36 | |
| 37 | namespace ceres { |
| 38 | namespace internal { |
| 39 | |
Alex Stewart | 3fca2c4 | 2013-11-18 10:26:49 +0000 | [diff] [blame] | 40 | // The (L)BFGS algorithm explicitly requires that the secant equation: |
| 41 | // |
| 42 | // B_{k+1} * s_k = y_k |
| 43 | // |
| 44 | // Is satisfied at each iteration, where B_{k+1} is the approximated |
| 45 | // Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and |
| 46 | // y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be |
| 47 | // positive definite, this is equivalent to the condition: |
| 48 | // |
| 49 | // s_k^T * y_k > 0 [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0] |
| 50 | // |
| 51 | // This condition would always be satisfied if the function was strictly |
| 52 | // convex, alternatively, it is always satisfied provided that a Wolfe line |
| 53 | // search is used (even if the function is not strictly convex). See [1] |
| 54 | // (p138) for a proof. |
| 55 | // |
| 56 | // Although Ceres will always use a Wolfe line search when using (L)BFGS, |
| 57 | // practical implementation considerations mean that the line search |
| 58 | // may return a point that satisfies only the Armijo condition, and thus |
| 59 | // could violate the Secant equation. As such, we will only use a step |
| 60 | // to update the Hessian approximation if: |
| 61 | // |
| 62 | // s_k^T * y_k > tolerance |
| 63 | // |
| 64 | // It is important that tolerance is very small (and >=0), as otherwise we |
| 65 | // might skip the update too often and fail to capture important curvature |
| 66 | // information in the Hessian. For example going from 1e-10 -> 1e-14 improves |
| 67 | // the NIST benchmark score from 43/54 to 53/54. |
| 68 | // |
| 69 | // [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999. |
| 70 | // |
Sameer Agarwal | 89a592f | 2013-11-26 11:35:49 -0800 | [diff] [blame] | 71 | // TODO(alexs.mac): Consider using Damped BFGS update instead of |
| 72 | // skipping update. |
Alex Stewart | 3fca2c4 | 2013-11-18 10:26:49 +0000 | [diff] [blame] | 73 | const double kLBFGSSecantConditionHessianUpdateTolerance = 1e-14; |
| 74 | |
Alex Stewart | 9aa0e3c | 2013-07-05 20:22:37 +0100 | [diff] [blame] | 75 | LowRankInverseHessian::LowRankInverseHessian( |
| 76 | int num_parameters, |
| 77 | int max_num_corrections, |
| 78 | bool use_approximate_eigenvalue_scaling) |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 79 | : num_parameters_(num_parameters), |
| 80 | max_num_corrections_(max_num_corrections), |
Alex Stewart | 9aa0e3c | 2013-07-05 20:22:37 +0100 | [diff] [blame] | 81 | use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling), |
Alex Stewart | 9aa0e3c | 2013-07-05 20:22:37 +0100 | [diff] [blame] | 82 | approximate_eigenvalue_scale_(1.0), |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 83 | delta_x_history_(num_parameters, max_num_corrections), |
| 84 | delta_gradient_history_(num_parameters, max_num_corrections), |
| 85 | delta_x_dot_delta_gradient_(max_num_corrections) { |
| 86 | } |
| 87 | |
Sameer Agarwal | 9883fc3 | 2012-11-30 12:32:43 -0800 | [diff] [blame] | 88 | bool LowRankInverseHessian::Update(const Vector& delta_x, |
| 89 | const Vector& delta_gradient) { |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 90 | const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient); |
Alex Stewart | 3fca2c4 | 2013-11-18 10:26:49 +0000 | [diff] [blame] | 91 | if (delta_x_dot_delta_gradient <= |
| 92 | kLBFGSSecantConditionHessianUpdateTolerance) { |
Alex Stewart | 331ff09 | 2013-11-25 13:44:53 +0000 | [diff] [blame] | 93 | VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too " |
| 94 | << "small: " << delta_x_dot_delta_gradient << ", tolerance: " |
| 95 | << kLBFGSSecantConditionHessianUpdateTolerance |
| 96 | << " (Secant condition)."; |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 97 | return false; |
| 98 | } |
| 99 | |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 100 | |
Sameer Agarwal | 80a53ee | 2014-01-09 12:40:54 -0800 | [diff] [blame] | 101 | int next = indices_.size(); |
| 102 | // Once the size of the list reaches max_num_corrections_, simulate |
| 103 | // a circular buffer by removing the first element of the list and |
| 104 | // making it the next position where the LBFGS history is stored. |
| 105 | if (next == max_num_corrections_) { |
| 106 | next = indices_.front(); |
| 107 | indices_.pop_front(); |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 108 | } |
| 109 | |
Sameer Agarwal | 80a53ee | 2014-01-09 12:40:54 -0800 | [diff] [blame] | 110 | indices_.push_back(next); |
| 111 | delta_x_history_.col(next) = delta_x; |
| 112 | delta_gradient_history_.col(next) = delta_gradient; |
| 113 | delta_x_dot_delta_gradient_(next) = delta_x_dot_delta_gradient; |
Alex Stewart | 9aa0e3c | 2013-07-05 20:22:37 +0100 | [diff] [blame] | 114 | approximate_eigenvalue_scale_ = |
| 115 | delta_x_dot_delta_gradient / delta_gradient.squaredNorm(); |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 116 | return true; |
| 117 | } |
| 118 | |
Sameer Agarwal | 9883fc3 | 2012-11-30 12:32:43 -0800 | [diff] [blame] | 119 | void LowRankInverseHessian::RightMultiply(const double* x_ptr, |
| 120 | double* y_ptr) const { |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 121 | ConstVectorRef gradient(x_ptr, num_parameters_); |
| 122 | VectorRef search_direction(y_ptr, num_parameters_); |
| 123 | |
| 124 | search_direction = gradient; |
| 125 | |
Sameer Agarwal | 80a53ee | 2014-01-09 12:40:54 -0800 | [diff] [blame] | 126 | const int num_corrections = indices_.size(); |
| 127 | Vector alpha(num_corrections); |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 128 | |
Sameer Agarwal | 80a53ee | 2014-01-09 12:40:54 -0800 | [diff] [blame] | 129 | for (std::list<int>::const_reverse_iterator it = indices_.rbegin(); |
| 130 | it != indices_.rend(); |
| 131 | ++it) { |
| 132 | const double alpha_i = delta_x_history_.col(*it).dot(search_direction) / |
| 133 | delta_x_dot_delta_gradient_(*it); |
| 134 | search_direction -= alpha_i * delta_gradient_history_.col(*it); |
| 135 | alpha(*it) = alpha_i; |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 136 | } |
| 137 | |
Alex Stewart | 9aa0e3c | 2013-07-05 20:22:37 +0100 | [diff] [blame] | 138 | if (use_approximate_eigenvalue_scaling_) { |
| 139 | // Rescale the initial inverse Hessian approximation (H_0) to be iteratively |
| 140 | // updated so that it is of similar 'size' to the true inverse Hessian along |
| 141 | // the most recent search direction. As shown in [1]: |
| 142 | // |
| 143 | // \gamma_k = (delta_gradient_{k-1}' * delta_x_{k-1}) / |
| 144 | // (delta_gradient_{k-1}' * delta_gradient_{k-1}) |
| 145 | // |
| 146 | // Satisfies: |
| 147 | // |
| 148 | // (1 / \lambda_m) <= \gamma_k <= (1 / \lambda_1) |
| 149 | // |
| 150 | // Where \lambda_1 & \lambda_m are the smallest and largest eigenvalues of |
| 151 | // the true Hessian (not the inverse) along the most recent search direction |
| 152 | // respectively. Thus \gamma is an approximate eigenvalue of the true |
| 153 | // inverse Hessian, and choosing: H_0 = I * \gamma will yield a starting |
| 154 | // point that has a similar scale to the true inverse Hessian. This |
| 155 | // technique is widely reported to often improve convergence, however this |
| 156 | // is not universally true, particularly if there are errors in the initial |
| 157 | // jacobians, or if there are significant differences in the sensitivity |
| 158 | // of the problem to the parameters (i.e. the range of the magnitudes of |
| 159 | // the components of the gradient is large). |
| 160 | // |
| 161 | // The original origin of this rescaling trick is somewhat unclear, the |
| 162 | // earliest reference appears to be Oren [1], however it is widely discussed |
| 163 | // without specific attributation in various texts including [2] (p143/178). |
| 164 | // |
| 165 | // [1] Oren S.S., Self-scaling variable metric (SSVM) algorithms Part II: |
| 166 | // Implementation and experiments, Management Science, |
| 167 | // 20(5), 863-874, 1974. |
| 168 | // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999. |
| 169 | search_direction *= approximate_eigenvalue_scale_; |
Alex Stewart | 40ef903 | 2013-11-25 16:36:40 +0000 | [diff] [blame] | 170 | |
| 171 | VLOG(4) << "Applying approximate_eigenvalue_scale: " |
| 172 | << approximate_eigenvalue_scale_ << " to initial inverse Hessian " |
| 173 | << "approximation."; |
Alex Stewart | 9aa0e3c | 2013-07-05 20:22:37 +0100 | [diff] [blame] | 174 | } |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 175 | |
Sameer Agarwal | 80a53ee | 2014-01-09 12:40:54 -0800 | [diff] [blame] | 176 | for (std::list<int>::const_iterator it = indices_.begin(); |
| 177 | it != indices_.end(); |
| 178 | ++it) { |
| 179 | const double beta = delta_gradient_history_.col(*it).dot(search_direction) / |
| 180 | delta_x_dot_delta_gradient_(*it); |
| 181 | search_direction += delta_x_history_.col(*it) * (alpha(*it) - beta); |
Sameer Agarwal | 3e8d192 | 2012-11-28 17:20:22 -0800 | [diff] [blame] | 182 | } |
| 183 | } |
| 184 | |
| 185 | } // namespace internal |
| 186 | } // namespace ceres |