Improve compatibility with ceres::Solver

1. Default linear solver is Eigen::LDLT
2. Options::max_iterations -> Options::max_num_iterations
3. Options::error_threshold -> Options::cost_threshold
4. Options::relative_step_threshold -> Options::parameter_threshold
5. Options::initial_scale_factor -> Options::initial_trust_region_radius
6. The default values of the above parameters have been changed
   to match those in ceres::Solver::Options
7. Status::RUNNING has been removed
8. Update now returns a bool instead of a Status enum and
   the status handling has been included in the main loop.
9. Summary::gradient_norm has been changed to Summary::gradient_max_norm
   to match the convergence test
10. A member variable cost_ has been added which is computed by Update
11. The test for parameter_tolerance based convergence is made
    more robust near zero.
12. Use of double has been replaced by Scalar.
13. Minor clang-formatting

Change-Id: I3cb0e2fd0a0204476bb8718761dc740cdf5e42ce
diff --git a/examples/nist.cc b/examples/nist.cc
index 7968104..5bd1003 100644
--- a/examples/nist.cc
+++ b/examples/nist.cc
@@ -589,17 +589,17 @@
     } else {
       ceres::TinySolverCostFunctionAdapter<Eigen::Dynamic, num_parameters> cfa(
           *cost_function);
-      typedef
-      ceres::TinySolver<
-        ceres::TinySolverCostFunctionAdapter<Eigen::Dynamic, num_parameters>,
-        Eigen::LDLT<Eigen::Matrix<double, num_parameters, num_parameters> > > Solver;
+      typedef ceres::TinySolver<
+          ceres::TinySolverCostFunctionAdapter<Eigen::Dynamic, num_parameters> >
+          Solver;
       Solver solver;
-      solver.options.max_iterations = FLAGS_num_iterations;
-      solver.options.error_threshold = std::numeric_limits<double>::epsilon();
-      solver.options.gradient_threshold = std::numeric_limits<double>::epsilon();
-      solver.options.relative_step_threshold = std::numeric_limits<double>::epsilon();
+      solver.options.max_num_iterations = FLAGS_num_iterations;
+      solver.options.gradient_tolerance =
+          std::numeric_limits<double>::epsilon();
+      solver.options.parameter_tolerance =
+          std::numeric_limits<double>::epsilon();
 
-      Eigen::Matrix<double, num_parameters,1> x;
+      Eigen::Matrix<double, num_parameters, 1> x;
       x = initial_parameters.transpose();
       typename Solver::Summary summary = solver.Solve(cfa, &x);
       initial_parameters = x;
diff --git a/include/ceres/tiny_solver.h b/include/ceres/tiny_solver.h
index cbdc228..c55375e 100644
--- a/include/ceres/tiny_solver.h
+++ b/include/ceres/tiny_solver.h
@@ -125,7 +125,7 @@
 //   int NumParameters() const;
 //
 template<typename Function,
-         typename LinearSolver = Eigen::PartialPivLU<
+         typename LinearSolver = Eigen::LDLT<
            Eigen::Matrix<typename Function::Scalar,
                          Function::NUM_PARAMETERS,
                          Function::NUM_PARAMETERS> > >
@@ -139,56 +139,48 @@
   typedef typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1> Parameters;
 
   enum Status {
-    RUNNING,
-    // Resulting solution may be OK to use.
     GRADIENT_TOO_SMALL,            // eps > max(J'*f(x))
-    RELATIVE_STEP_SIZE_TOO_SMALL,  // eps > ||dx|| / ||x||
-    ERROR_TOO_SMALL,               // eps > ||f(x)||
+    RELATIVE_STEP_SIZE_TOO_SMALL,  // eps > ||dx|| / (||x|| + eps)
+    COST_TOO_SMALL,                // eps > ||f(x)||^2 / 2
     HIT_MAX_ITERATIONS,
 
-    // Numerical issues
-    FAILED_TO_EVALUATE_COST_FUNCTION,
-    FAILED_TO_SOLVER_LINEAR_SYSTEM,
+    // TODO(sameeragarwal): Deal with numerical failures.
   };
 
-  // TODO(keir): Some of these knobs can be derived from each other and
-  // removed, instead of requiring the user to set them.
-  // TODO(sameeragarwal): Make the parameters consistent with ceres
-  // TODO(sameeragarwal): Return a struct instead of just an enum.
   struct Options {
     Options()
-       : gradient_threshold(1e-16),
-         relative_step_threshold(1e-16),
-         error_threshold(1e-16),
-         initial_scale_factor(1e-4),
-         max_iterations(100) {}
-    Scalar gradient_threshold;       // eps > max(J'*f(x))
-    Scalar relative_step_threshold;  // eps > ||dx|| / ||x||
-    Scalar error_threshold;          // eps > ||f(x)||
-    Scalar initial_scale_factor;     // Initial u for solving normal equations.
-    int    max_iterations;           // Maximum number of solver iterations.
+        : gradient_tolerance(1e-10),
+          parameter_tolerance(1e-8),
+          cost_threshold(std::numeric_limits<Scalar>::epsilon()),
+          initial_trust_region_radius(1e4),
+          max_num_iterations(50) {}
+    Scalar gradient_tolerance;   // eps > max(J'*f(x))
+    Scalar parameter_tolerance;  // eps > ||dx|| / ||x||
+    Scalar cost_threshold;       // eps > ||f(x)||
+    Scalar initial_trust_region_radius;
+    int max_num_iterations;
   };
 
   struct Summary {
     Summary()
         : initial_cost(-1),
           final_cost(-1),
-          gradient_magnitude(-1),
-          num_failed_linear_solves(0),
+          gradient_max_norm(-1),
           iterations(0),
-          status(RUNNING) {}
+          status(HIT_MAX_ITERATIONS) {}
 
-    Scalar initial_cost;        // 1/2 ||f(x)||^2
-    Scalar final_cost;          // 1/2 ||f(x)||^2
-    Scalar gradient_magnitude;  // ||J'f(x)||
-    int num_failed_linear_solves;
+    Scalar initial_cost;       // 1/2 ||f(x)||^2
+    Scalar final_cost;         // 1/2 ||f(x)||^2
+    Scalar gradient_max_norm;  // max(J'f(x))
     int iterations;
     Status status;
   };
 
-  Status Update(const Function& function, const Parameters &x) {
-    // TODO(keir): Handle false return from the cost function.
-    function(&x(0), &error_(0), &jacobian_(0, 0));
+  bool Update(const Function& function, const Parameters &x) {
+    if (!function(x.data(), error_.data(), jacobian_.data())) {
+      return false;
+    }
+
     error_ = -error_;
 
     // On the first iteration, compute a diagonal (Jacobi) scaling
@@ -209,31 +201,37 @@
     jacobian_ = jacobian_ * jacobi_scaling_.asDiagonal();
     jtj_ = jacobian_.transpose() * jacobian_;
     g_ = jacobian_.transpose() * error_;
-    if (g_.array().abs().maxCoeff() < options.gradient_threshold) {
-      return GRADIENT_TOO_SMALL;
-    } else if (error_.norm() < options.error_threshold) {
-      return ERROR_TOO_SMALL;
-    }
-
-    return RUNNING;
+    summary.gradient_max_norm = g_.array().abs().maxCoeff();
+    cost_ = error_.squaredNorm() / 2;
+    return true;
   }
 
-  Summary& Solve(const Function& function, Parameters* x_and_min) {
+  const Summary& Solve(const Function& function, Parameters* x_and_min) {
     Initialize<NUM_RESIDUALS, NUM_PARAMETERS>(function);
     assert(x_and_min);
     Parameters& x = *x_and_min;
     summary = Summary();
 
     // TODO(sameeragarwal): Deal with failure here.
-    summary.status = Update(function, x);
-    summary.initial_cost = error_.squaredNorm() / Scalar(2.0);
+    Update(function, x);
+    summary.initial_cost = cost_;
+    summary.final_cost = cost_;
 
-    Scalar u = options.initial_scale_factor;
+    if (summary.gradient_max_norm < options.gradient_tolerance) {
+      summary.status = GRADIENT_TOO_SMALL;
+      return summary;
+    }
+
+    if (cost_ < options.cost_threshold) {
+      summary.status = COST_TOO_SMALL;
+      return summary;
+    }
+
+    Scalar u = 1.0 / options.initial_trust_region_radius;
     Scalar v = 2;
 
     for (summary.iterations = 1;
-         (summary.status == RUNNING &&
-          summary.iterations < options.max_iterations);
+         summary.iterations < options.max_num_iterations;
          summary.iterations++) {
       jtj_regularized_ = jtj_;
       const Scalar min_diagonal = 1e-6;
@@ -248,7 +246,13 @@
       linear_solver_.compute(jtj_regularized_);
       lm_step_ = linear_solver_.solve(g_);
       dx_ = jacobi_scaling_.asDiagonal() * lm_step_;
-      if (dx_.norm() < options.relative_step_threshold * x.norm()) {
+
+      // Adding parameter_tolerance to x.norm() ensures that this
+      // works if x is near zero.
+      const Scalar parameter_tolerance =
+          options.parameter_tolerance *
+          (x.norm() + options.parameter_tolerance);
+      if (dx_.norm() < parameter_tolerance) {
         summary.status = RELATIVE_STEP_SIZE_TOO_SMALL;
         break;
       }
@@ -258,13 +262,10 @@
       // functions.
       function(&x_new_[0], &f_x_new_[0], NULL);
 
-      // TODO(sameeragarwal): Maintain cost estimate for the iteration
-      // and save on a dot product here.
-      const double cost_change =
-          (error_.squaredNorm() - f_x_new_.squaredNorm());
+      const Scalar cost_change = (2 * cost_ - f_x_new_.squaredNorm());
 
       // TODO(sameeragarwal): Better more numerically stable evaluation.
-      const double model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_);
+      const Scalar model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_);
 
       // rho is the ratio of the actual reduction in error to the reduction
       // in error that would be obtained if the problem was linear. See [1]
@@ -274,7 +275,19 @@
         // Accept the Levenberg-Marquardt step because the linear
         // model fits well.
         x = x_new_;
-        summary.status = Update(function, x);
+
+        // TODO(sameeragarwal): Deal with failure.
+        Update(function, x);
+        if (summary.gradient_max_norm < options.gradient_tolerance) {
+          summary.status = GRADIENT_TOO_SMALL;
+          break;
+        }
+
+        if (cost_ < options.cost_threshold) {
+          summary.status = COST_TOO_SMALL;
+          break;
+        }
+
         Scalar tmp = Scalar(2 * rho - 1);
         u = u * std::max(1 / 3., 1 - tmp * tmp * tmp);
         v = 2;
@@ -288,12 +301,7 @@
       v *= 2;
     }
 
-    if (summary.status == RUNNING) {
-      summary.status = HIT_MAX_ITERATIONS;
-    }
-
-    summary.final_cost = error_.squaredNorm() / Scalar(2.0);
-    summary.gradient_magnitude = g_.norm();
+    summary.final_cost = cost_;
     return summary;
   }
 
@@ -304,15 +312,18 @@
   // Preallocate everything, including temporary storage needed for solving the
   // linear system. This allows reusing the intermediate storage across solves.
   LinearSolver linear_solver_;
+  Scalar cost_;
   Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_;
   Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> error_, f_x_new_;
   Eigen::Matrix<Scalar, NUM_RESIDUALS, NUM_PARAMETERS> jacobian_;
   Eigen::Matrix<Scalar, NUM_PARAMETERS, NUM_PARAMETERS> jtj_, jtj_regularized_;
 
   // The following definitions are needed for template metaprogramming.
-  template<bool Condition, typename T> struct enable_if;
+  template <bool Condition, typename T>
+  struct enable_if;
 
-  template<typename T> struct enable_if<true, T> {
+  template <typename T>
+  struct enable_if<true, T> {
     typedef T type;
   };
 
@@ -342,7 +353,7 @@
   // The number of parameters and residuals are statically sized.
   template <int R, int P>
   typename enable_if<(R != Eigen::Dynamic && P != Eigen::Dynamic), void>::type
-  Initialize(const Function& /* function */) { }
+  Initialize(const Function& /* function */) {}
 
   void Initialize(int num_residuals, int num_parameters) {
     dx_.resize(num_parameters);