A complete refactoring of TrustRegionMinimizer.

1. Break up the monolithic loop in TrustRegionMinimizer::Minimize
   into a number of more easily described and analyzed subfunctions.
2. Break out the logic for evaluating the quality of a Trust Region
   step into its own object - TrustRegionStepEvaluator.

Change-Id: I08580ecac074cfd74c096cb8e4880cbda3d48296
diff --git a/include/ceres/iteration_callback.h b/include/ceres/iteration_callback.h
index 6bab004..db5d0ef 100644
--- a/include/ceres/iteration_callback.h
+++ b/include/ceres/iteration_callback.h
@@ -69,7 +69,7 @@
   // Step was numerically valid, i.e., all values are finite and the
   // step reduces the value of the linearized model.
   //
-  // Note: step_is_valid is false when iteration = 0.
+  // Note: step_is_valid is always true when iteration = 0.
   bool step_is_valid;
 
   // Step did not reduce the value of the objective function
@@ -77,7 +77,7 @@
   // acceptance criterion used by the non-monotonic trust region
   // algorithm.
   //
-  // Note: step_is_nonmonotonic is false when iteration = 0;
+  // Note: step_is_nonmonotonic is always false when iteration = 0;
   bool step_is_nonmonotonic;
 
   // Whether or not the minimizer accepted this step or not. If the
@@ -89,7 +89,7 @@
   // relative decrease is not sufficient, the algorithm may accept the
   // step and the step is declared successful.
   //
-  // Note: step_is_successful is false when iteration = 0.
+  // Note: step_is_successful is always true when iteration = 0.
   bool step_is_successful;
 
   // Value of the objective function.
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
index 3a51309..a9fee1a 100644
--- a/internal/ceres/CMakeLists.txt
+++ b/internal/ceres/CMakeLists.txt
@@ -108,6 +108,7 @@
     triplet_sparse_matrix.cc
     trust_region_preprocessor.cc
     trust_region_minimizer.cc
+    trust_region_step_evaluator.cc
     trust_region_strategy.cc
     types.cc
     visibility.cc
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 627430c..7a4a775 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2016 Google Inc. All rights reserved.
 // http://ceres-solver.org/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -43,681 +43,732 @@
 #include "ceres/coordinate_descent_minimizer.h"
 #include "ceres/evaluator.h"
 #include "ceres/file.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
 #include "ceres/line_search.h"
-#include "ceres/linear_least_squares_problems.h"
-#include "ceres/sparse_matrix.h"
 #include "ceres/stringprintf.h"
-#include "ceres/trust_region_strategy.h"
 #include "ceres/types.h"
 #include "ceres/wall_time.h"
 #include "glog/logging.h"
 
+// Helper macro to simplify some of the control flow.
+#define RETURN_IF_ERROR_AND_LOG(expr)                            \
+  do {                                                           \
+    if (!(expr)) {                                               \
+      LOG(ERROR) << "Terminating: " << solver_summary_->message; \
+      return;                                                    \
+    }                                                            \
+  } while (0)
+
 namespace ceres {
 namespace internal {
-namespace {
 
-LineSearch::Summary DoLineSearch(const Minimizer::Options& options,
-                                 const Vector& x,
-                                 const Vector& gradient,
-                                 const double cost,
-                                 const Vector& delta,
-                                 Evaluator* evaluator) {
-  LineSearchFunction line_search_function(evaluator);
+TrustRegionMinimizer::~TrustRegionMinimizer() {}
+
+void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
+                                    double* parameters,
+                                    Solver::Summary* solver_summary) {
+  start_time_ = WallTimeInSeconds();
+  iteration_start_time_ = start_time_;
+  Init(options, parameters, solver_summary);
+  RETURN_IF_ERROR_AND_LOG(IterationZero());
+
+  // Create the TrustRegionStepEvaluator. The construction needs to be
+  // delayed to this point because we need the cost for the starting
+  // point to initialize the step evaluator.
+  step_evaluator_.reset(new TrustRegionStepEvaluator(
+      x_cost_,
+      options_.use_nonmonotonic_steps
+          ? options_.max_consecutive_nonmonotonic_steps
+          : 0));
+
+  while (FinalizeIterationAndCheckIfMinimizerCanContinue()) {
+    iteration_start_time_ = WallTimeInSeconds();
+    iteration_summary_ = IterationSummary();
+    iteration_summary_.iteration =
+        solver_summary->iterations.back().iteration + 1;
+
+    RETURN_IF_ERROR_AND_LOG(ComputeTrustRegionStep());
+    if (!iteration_summary_.step_is_valid) {
+      RETURN_IF_ERROR_AND_LOG(HandleInvalidStep());
+      continue;
+    }
+
+    if (options_.is_constrained) {
+      // Use a projected line search to enforce the bounds constraints
+      // and improve the quality of the step.
+      DoLineSearch(x_, gradient_, x_cost_, &delta_);
+    }
+
+    ComputeCandidatePointAndEvaluateCost();
+    DoInnerIterationsIfNeeded();
+
+    if (ParameterToleranceReached()) {
+      return;
+    }
+
+    if (FunctionToleranceReached()) {
+      return;
+    }
+
+    if (IsStepSuccessful()) {
+      RETURN_IF_ERROR_AND_LOG(HandleSuccessfulStep());
+      continue;
+    }
+
+    HandleUnsuccessfulStep();
+  }
+}
+
+// Initialize the minimizer, allocate working space and set some of
+// the fields in the solver_summary.
+void TrustRegionMinimizer::Init(const Minimizer::Options& options,
+                                double* parameters,
+                                Solver::Summary* solver_summary) {
+  options_ = options;
+  sort(options_.trust_region_minimizer_iterations_to_dump.begin(),
+       options_.trust_region_minimizer_iterations_to_dump.end());
+
+  parameters_ = parameters;
+
+  solver_summary_ = solver_summary;
+  solver_summary_->termination_type = NO_CONVERGENCE;
+  solver_summary_->num_successful_steps = 0;
+  solver_summary_->num_unsuccessful_steps = 0;
+  solver_summary_->is_constrained = options.is_constrained;
+
+  evaluator_ = CHECK_NOTNULL(options_.evaluator.get());
+  jacobian_ = CHECK_NOTNULL(options_.jacobian.get());
+  strategy_ = CHECK_NOTNULL(options_.trust_region_strategy.get());
+
+  is_not_silent_ = !options.is_silent;
+  inner_iterations_are_enabled_ =
+      options.inner_iteration_minimizer.get() != NULL;
+  inner_iterations_were_useful_ = false;
+
+  num_parameters_ = evaluator_->NumParameters();
+  num_effective_parameters_ = evaluator_->NumEffectiveParameters();
+  num_residuals_ = evaluator_->NumResiduals();
+  num_consecutive_invalid_steps_ = 0;
+
+  x_ = ConstVectorRef(parameters_, num_parameters_);
+  x_norm_ = x_.norm();
+  residuals_.resize(num_residuals_);
+  trust_region_step_.resize(num_effective_parameters_);
+  delta_.resize(num_effective_parameters_);
+  candidate_x_.resize(num_parameters_);
+  gradient_.resize(num_effective_parameters_);
+  model_residuals_.resize(num_residuals_);
+  negative_gradient_.resize(num_effective_parameters_);
+  projected_gradient_step_.resize(num_parameters_);
+
+  // By default scaling is one, if the user requests Jacobi scaling of
+  // the Jacobian, we will compute and overwrite this vector.
+  jacobian_scaling_ = Vector::Ones(num_effective_parameters_);
+
+  x_norm_ = -1;  // Invalid value
+  x_cost_ = std::numeric_limits<double>::max();
+  minimum_cost_ = x_cost_;
+  model_cost_change_ = 0.0;
+}
+
+// 1. Project the initial solution onto the feasible set if needed.
+// 2. Compute the initial cost, jacobian & gradient.
+//
+// Return true if all computations can be performed successfully.
+bool TrustRegionMinimizer::IterationZero() {
+  iteration_summary_ = IterationSummary();
+  iteration_summary_.iteration = 0;
+  iteration_summary_.step_is_valid = false;
+  iteration_summary_.step_is_successful = false;
+  iteration_summary_.cost_change = 0.0;
+  iteration_summary_.gradient_max_norm = 0.0;
+  iteration_summary_.gradient_norm = 0.0;
+  iteration_summary_.step_norm = 0.0;
+  iteration_summary_.relative_decrease = 0.0;
+  iteration_summary_.eta = options_.eta;
+  iteration_summary_.linear_solver_iterations = 0;
+  iteration_summary_.step_solver_time_in_seconds = 0;
+
+  if (options_.is_constrained) {
+    delta_.setZero();
+    if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
+      solver_summary_->message =
+          "Unable to project initial point onto the feasible set.";
+      solver_summary_->termination_type = FAILURE;
+      return false;
+    }
+
+    x_ = candidate_x_;
+    x_norm_ = x_.norm();
+  }
+
+  if (!EvaluateGradientAndJacobian()) {
+    return false;
+  }
+
+  solver_summary_->initial_cost = x_cost_ + solver_summary_->fixed_cost;
+  iteration_summary_.step_is_valid = true;
+  iteration_summary_.step_is_successful = true;
+  return true;
+}
+
+// For the current x_, compute
+//
+//  1. Cost
+//  2. Jacobian
+//  3. Gradient
+//  4. Scale the Jacobian if needed (and compute the scaling if we are
+//     in iteration zero).
+//  5. Compute the 2 and max norm of the gradient.
+//
+// Returns true if all computations could be performed
+// successfully. Any failures are considered fatal and the
+// Solver::Summary is updated to indicate this.
+bool TrustRegionMinimizer::EvaluateGradientAndJacobian() {
+  if (!evaluator_->Evaluate(x_.data(),
+                            &x_cost_,
+                            residuals_.data(),
+                            gradient_.data(),
+                            jacobian_)) {
+    solver_summary_->message = "Residual and Jacobian evaluation failed.";
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  iteration_summary_.cost = x_cost_ + solver_summary_->fixed_cost;
+
+  if (options_.jacobi_scaling) {
+    if (iteration_summary_.iteration == 0) {
+      // Compute a scaling vector that is used to improve the
+      // conditioning of the Jacobian.
+      //
+      // jacobian_scaling_ = diag(J'J)^{-1}
+      jacobian_->SquaredColumnNorm(jacobian_scaling_.data());
+      for (int i = 0; i < jacobian_->num_cols(); ++i) {
+        // Add one to the denominator to prevent division by zero.
+        jacobian_scaling_[i] = 1.0 / (1.0 + sqrt(jacobian_scaling_[i]));
+      }
+    }
+
+    // jacobian = jacobian * diag(J'J) ^{-1}
+    jacobian_->ScaleColumns(jacobian_scaling_.data());
+  }
+
+  // The gradient exists in the local tangent space. To account for
+  // the bounds constraints correctly, instead of just computing the
+  // norm of the gradient vector, we compute
+  //
+  // |Plus(x, -gradient) - x|
+  //
+  // Where the Plus operator lifts the negative gradient to the
+  // ambient space, adds it to x and projects it on the hypercube
+  // defined by the bounds.
+  negative_gradient_ = -gradient_;
+  if (!evaluator_->Plus(x_.data(),
+                        negative_gradient_.data(),
+                        projected_gradient_step_.data())) {
+    solver_summary_->message =
+        "projected_gradient_step = Plus(x, -gradient) failed.";
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  iteration_summary_.gradient_max_norm =
+      (x_ - projected_gradient_step_).lpNorm<Eigen::Infinity>();
+  iteration_summary_.gradient_norm = (x_ - projected_gradient_step_).norm();
+  return true;
+}
+
+// 1. Add the final timing information to the iteration summary.
+// 2. Run the callbacks
+// 3. Check for termination based on
+//    a. Run time
+//    b. Iteration count
+//    c. Max norm of the gradient
+//    d. Size of the trust region radius.
+//
+// Returns true if user did not terminate the solver and none of these
+// termination criterion are met.
+bool TrustRegionMinimizer::FinalizeIterationAndCheckIfMinimizerCanContinue() {
+  if (iteration_summary_.step_is_successful) {
+    ++solver_summary_->num_successful_steps;
+    if (x_cost_ < minimum_cost_) {
+      minimum_cost_ = x_cost_;
+      VectorRef(parameters_, num_parameters_) = x_;
+      iteration_summary_.step_is_nonmonotonic = false;
+    } else {
+      iteration_summary_.step_is_nonmonotonic = true;
+    }
+  } else {
+    ++solver_summary_->num_unsuccessful_steps;
+  }
+
+  iteration_summary_.trust_region_radius = strategy_->Radius();
+  iteration_summary_.iteration_time_in_seconds =
+      WallTimeInSeconds() - iteration_start_time_;
+  iteration_summary_.cumulative_time_in_seconds =
+      WallTimeInSeconds() - start_time_ +
+      solver_summary_->preprocessor_time_in_seconds;
+
+  solver_summary_->iterations.push_back(iteration_summary_);
+
+  if (!RunCallbacks(options_, iteration_summary_, solver_summary_)) {
+    return false;
+  }
+
+  if (MaxSolverTimeReached()) {
+    return false;
+  }
+
+  if (MaxSolverIterationsReached()) {
+    return false;
+  }
+
+  if (GradientToleranceReached()) {
+    return false;
+  }
+
+  if (MinTrustRegionRadiusReached()) {
+    return false;
+  }
+
+  return true;
+}
+
+// Compute the trust region step using the TrustRegionStrategy chosen
+// by the user.
+//
+// If the strategy returns with LINEAR_SOLVER_FATAL_ERROR, which
+// indicates an unrecoverable error, return false. This is the only
+// condition that returns false.
+//
+// If the strategy returns with LINEAR_SOLVER_FAILURE, which indicates
+// a numerical failure that could be recovered from by retrying
+// (e.g. by increasing the strength of the regularization), we set
+// iteration_summary_.step_is_valid to false and return true.
+//
+// In all other cases, we compute the decrease in the trust region
+// model problem. In exact arithmetic, this should always be
+// positive, but due to numerical problems in the TrustRegionStrategy
+// or round off error when computing the decrease it may be
+// negative. In which case again, we set
+// iteration_summary_.step_is_valid to false.
+bool TrustRegionMinimizer::ComputeTrustRegionStep() {
+  const double strategy_start_time = WallTimeInSeconds();
+  iteration_summary_.step_is_valid = false;
+  TrustRegionStrategy::PerSolveOptions per_solve_options;
+  per_solve_options.eta = options_.eta;
+  if (find(options_.trust_region_minimizer_iterations_to_dump.begin(),
+           options_.trust_region_minimizer_iterations_to_dump.end(),
+           iteration_summary_.iteration) !=
+      options_.trust_region_minimizer_iterations_to_dump.end()) {
+    per_solve_options.dump_format_type =
+        options_.trust_region_problem_dump_format_type;
+    per_solve_options.dump_filename_base =
+        JoinPath(options_.trust_region_problem_dump_directory,
+                 StringPrintf("ceres_solver_iteration_%03d",
+                              iteration_summary_.iteration));
+  }
+
+  TrustRegionStrategy::Summary strategy_summary =
+      strategy_->ComputeStep(per_solve_options,
+                             jacobian_,
+                             residuals_.data(),
+                             trust_region_step_.data());
+
+  if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    solver_summary_->message =
+        "Linear solver failed due to unrecoverable "
+        "non-numeric causes. Please see the error log for clues. ";
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  iteration_summary_.step_solver_time_in_seconds =
+      WallTimeInSeconds() - strategy_start_time;
+  iteration_summary_.linear_solver_iterations = strategy_summary.num_iterations;
+
+  if (strategy_summary.termination_type == LINEAR_SOLVER_FAILURE) {
+    return true;
+  }
+
+  // new_model_cost
+  //  = 1/2 [f + J * step]^2
+  //  = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
+  // model_cost_change
+  //  = cost - new_model_cost
+  //  = f'f/2  - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
+  //  = -f'J * step - step' * J' * J * step / 2
+  //  = -(J * step)'(f + J * step / 2)
+  model_residuals_.setZero();
+  jacobian_->RightMultiply(trust_region_step_.data(), model_residuals_.data());
+  model_cost_change_ =
+      -model_residuals_.dot(residuals_ + model_residuals_ / 2.0);
+
+  // TODO(sameeragarwal)
+  //
+  //  1. What happens if model_cost_change_ = 0
+  //  2. What happens if -epsilon <= model_cost_change_ < 0 for some
+  //     small epsilon due to round off error.
+  iteration_summary_.step_is_valid = (model_cost_change_ > 0.0);
+  if (iteration_summary_.step_is_valid) {
+    // Undo the Jacobian column scaling.
+    delta_ = (trust_region_step_.array() * jacobian_scaling_.array()).matrix();
+    num_consecutive_invalid_steps_ = 0;
+  }
+
+  VLOG_IF(1, is_not_silent_ && !iteration_summary_.step_is_valid)
+      << "Invalid step: current_cost: " << x_cost_
+      << " absolute model cost change: " << model_cost_change_
+      << " relative model cost change: " << (model_cost_change_ / x_cost_);
+  return true;
+}
+
+// Invalid steps can happen due to a number of reasons, and we allow a
+// limited number of consecutive failures, and return false if this
+// limit is exceeded.
+bool TrustRegionMinimizer::HandleInvalidStep() {
+  // TODO(sameeragarwal): Should we be returning FAILURE or
+  // NO_CONVERGENCE? The solution value is still usable in many cases,
+  // it is not clear if we should declare the solver a failure
+  // entirely. For example the case where model_cost_change ~ 0.0, but
+  // just slightly negative.
+  if (++num_consecutive_invalid_steps_ >=
+      options_.max_num_consecutive_invalid_steps) {
+    solver_summary_->message = StringPrintf(
+        "Number of consecutive invalid steps more "
+        "than Solver::Options::max_num_consecutive_invalid_steps: %d",
+        options_.max_num_consecutive_invalid_steps);
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  strategy_->StepIsInvalid();
+
+  // We are going to try and reduce the trust region radius and
+  // solve again. To do this, we are going to treat this iteration
+  // as an unsuccessful iteration. Since the various callbacks are
+  // still executed, we are going to fill the iteration summary
+  // with data that assumes a step of length zero and no progress.
+  iteration_summary_.cost = x_cost_ + solver_summary_->fixed_cost;
+  iteration_summary_.cost_change = 0.0;
+  iteration_summary_.gradient_max_norm =
+      solver_summary_->iterations.back().gradient_max_norm;
+  iteration_summary_.gradient_norm =
+      solver_summary_->iterations.back().gradient_norm;
+  iteration_summary_.step_norm = 0.0;
+  iteration_summary_.relative_decrease = 0.0;
+  iteration_summary_.eta = options_.eta;
+  return true;
+}
+
+// Use the supplied coordinate descent minimizer to perform inner
+// iterations and compute the improvement due to it. Returns the cost
+// after performing the inner iterations.
+//
+// The optimization is performed with candidate_x_ as the starting
+// point, and if the optimization is successful, candidate_x_ will be
+// updated with the optimized parameters.
+void TrustRegionMinimizer::DoInnerIterationsIfNeeded() {
+  if (!inner_iterations_are_enabled_ ||
+      candidate_cost_ >= std::numeric_limits<double>::max()) {
+    return;
+  }
+
+  double inner_iteration_start_time = WallTimeInSeconds();
+  ++solver_summary_->num_inner_iteration_steps;
+  inner_iteration_x_ = candidate_x_;
+  Solver::Summary inner_iteration_summary;
+  options_.inner_iteration_minimizer->Minimize(
+      options_, inner_iteration_x_.data(), &inner_iteration_summary);
+  double inner_iteration_cost;
+  if (!evaluator_->Evaluate(
+          inner_iteration_x_.data(), &inner_iteration_cost, NULL, NULL, NULL)) {
+    VLOG_IF(2, is_not_silent_) << "Inner iteration failed.";
+    return;
+  }
+
+  VLOG_IF(2, is_not_silent_)
+      << "Inner iteration succeeded; Current cost: " << x_cost_
+      << " Trust region step cost: " << candidate_cost_
+      << " Inner iteration cost: " << inner_iteration_cost;
+
+  candidate_x_ = inner_iteration_x_;
+
+  // Normally, the quality of a trust region step is measured by
+  // the ratio
+  //
+  //              cost_change
+  //    r =    -----------------
+  //           model_cost_change
+  //
+  // All the change in the nonlinear objective is due to the trust
+  // region step so this ratio is a good measure of the quality of
+  // the trust region radius. However, when inner iterations are
+  // being used, cost_change includes the contribution of the
+  // inner iterations and its not fair to credit it all to the
+  // trust region algorithm. So we change the ratio to be
+  //
+  //                              cost_change
+  //    r =    ------------------------------------------------
+  //           (model_cost_change + inner_iteration_cost_change)
+  //
+  // Practically we do this by increasing model_cost_change by
+  // inner_iteration_cost_change.
+
+  const double inner_iteration_cost_change =
+      candidate_cost_ - inner_iteration_cost;
+  model_cost_change_ += inner_iteration_cost_change;
+  inner_iterations_were_useful_ = inner_iteration_cost < x_cost_;
+  const double inner_iteration_relative_progress =
+      1.0 - inner_iteration_cost / candidate_cost_;
+
+  // Disable inner iterations once the relative improvement
+  // drops below tolerance.
+  inner_iterations_are_enabled_ =
+      (inner_iteration_relative_progress > options_.inner_iteration_tolerance);
+  VLOG_IF(2, is_not_silent_ && !inner_iterations_are_enabled_)
+      << "Disabling inner iterations. Progress : "
+      << inner_iteration_relative_progress;
+  candidate_cost_ = inner_iteration_cost;
+
+  solver_summary_->inner_iteration_time_in_seconds +=
+      WallTimeInSeconds() - inner_iteration_start_time;
+}
+
+// Perform a projected line search to improve the objective function
+// value along delta.
+//
+// TODO(sameeragarwal): The current implementation does not do
+// anything illegal but is incorrect and not terribly effective.
+//
+// https://github.com/ceres-solver/ceres-solver/issues/187
+void TrustRegionMinimizer::DoLineSearch(const Vector& x,
+                                        const Vector& gradient,
+                                        const double cost,
+                                        Vector* delta) {
+  LineSearchFunction line_search_function(evaluator_);
 
   LineSearch::Options line_search_options;
   line_search_options.is_silent = true;
   line_search_options.interpolation_type =
-      options.line_search_interpolation_type;
-  line_search_options.min_step_size = options.min_line_search_step_size;
+      options_.line_search_interpolation_type;
+  line_search_options.min_step_size = options_.min_line_search_step_size;
   line_search_options.sufficient_decrease =
-      options.line_search_sufficient_function_decrease;
+      options_.line_search_sufficient_function_decrease;
   line_search_options.max_step_contraction =
-      options.max_line_search_step_contraction;
+      options_.max_line_search_step_contraction;
   line_search_options.min_step_contraction =
-      options.min_line_search_step_contraction;
+      options_.min_line_search_step_contraction;
   line_search_options.max_num_iterations =
-      options.max_num_line_search_step_size_iterations;
+      options_.max_num_line_search_step_size_iterations;
   line_search_options.sufficient_curvature_decrease =
-      options.line_search_sufficient_curvature_decrease;
+      options_.line_search_sufficient_curvature_decrease;
   line_search_options.max_step_expansion =
-      options.max_line_search_step_expansion;
+      options_.max_line_search_step_expansion;
   line_search_options.function = &line_search_function;
 
   std::string message;
-  scoped_ptr<LineSearch> line_search(
-      CHECK_NOTNULL(LineSearch::Create(ceres::ARMIJO,
-                                       line_search_options,
-                                       &message)));
-  LineSearch::Summary summary;
-  line_search_function.Init(x, delta);
-  line_search->Search(1.0, cost, gradient.dot(delta), &summary);
-  return summary;
-}
+  scoped_ptr<LineSearch> line_search(CHECK_NOTNULL(
+      LineSearch::Create(ceres::ARMIJO, line_search_options, &message)));
+  LineSearch::Summary line_search_summary;
+  line_search_function.Init(x, *delta);
+  line_search->Search(1.0, cost, gradient.dot(*delta), &line_search_summary);
 
-}  // namespace
+  solver_summary_->num_line_search_steps += line_search_summary.num_iterations;
+  solver_summary_->line_search_cost_evaluation_time_in_seconds +=
+      line_search_summary.cost_evaluation_time_in_seconds;
+  solver_summary_->line_search_gradient_evaluation_time_in_seconds +=
+      line_search_summary.gradient_evaluation_time_in_seconds;
+  solver_summary_->line_search_polynomial_minimization_time_in_seconds +=
+      line_search_summary.polynomial_minimization_time_in_seconds;
+  solver_summary_->line_search_total_time_in_seconds +=
+      line_search_summary.total_time_in_seconds;
 
-// Compute a scaling vector that is used to improve the conditioning
-// of the Jacobian.
-void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian,
-                                         double* scale) const {
-  jacobian.SquaredColumnNorm(scale);
-  for (int i = 0; i < jacobian.num_cols(); ++i) {
-    scale[i] = 1.0 / (1.0 + sqrt(scale[i]));
+  if (line_search_summary.success) {
+    *delta *= line_search_summary.optimal_step_size;
   }
 }
 
-void TrustRegionMinimizer::Init(const Minimizer::Options& options) {
-  options_ = options;
-  sort(options_.trust_region_minimizer_iterations_to_dump.begin(),
-       options_.trust_region_minimizer_iterations_to_dump.end());
+bool TrustRegionMinimizer::MaxSolverTimeReached() {
+  const double total_solver_time =
+      WallTimeInSeconds() - start_time_ +
+      solver_summary_->preprocessor_time_in_seconds;
+  if (total_solver_time < options_.max_solver_time_in_seconds) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf("Maximum solver time reached. "
+                                          "Total solver time: %e >= %e.",
+                                          total_solver_time,
+                                          options_.max_solver_time_in_seconds);
+  solver_summary_->termination_type = NO_CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
 }
 
-void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
-                                    double* parameters,
-                                    Solver::Summary* summary) {
-  double start_time = WallTimeInSeconds();
-  double iteration_start_time =  start_time;
-  Init(options);
+bool TrustRegionMinimizer::MaxSolverIterationsReached() {
+  if (iteration_summary_.iteration < options_.max_num_iterations) {
+    return false;
+  }
 
-  Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator.get());
-  SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian.get());
-  TrustRegionStrategy* strategy =
-      CHECK_NOTNULL(options_.trust_region_strategy.get());
+  solver_summary_->message =
+      StringPrintf("Maximum number of iterations reached. "
+                   "Number of iterations: %d.",
+                   iteration_summary_.iteration);
 
-  const bool is_not_silent = !options.is_silent;
+  solver_summary_->termination_type = NO_CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
 
-  // If the problem is bounds constrained, then enable the use of a
-  // line search after the trust region step has been computed. This
-  // line search will automatically use a projected test point onto
-  // the feasible set, there by guaranteeing the feasibility of the
-  // final output.
+bool TrustRegionMinimizer::GradientToleranceReached() {
+  if (!iteration_summary_.step_is_successful ||
+      iteration_summary_.gradient_max_norm > options_.gradient_tolerance) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf(
+      "Gradient tolerance reached. "
+      "Gradient max norm: %e <= %e",
+      iteration_summary_.gradient_max_norm,
+      options_.gradient_tolerance);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+bool TrustRegionMinimizer::MinTrustRegionRadiusReached() {
+  if (iteration_summary_.trust_region_radius >
+      options_.min_trust_region_radius) {
+    return false;
+  }
+
+  solver_summary_->message =
+      StringPrintf("Minimum trust region radius reached. "
+                   "Trust region radius: %e <= %e",
+                   iteration_summary_.trust_region_radius,
+                   options_.min_trust_region_radius);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Solver::Options::parameter_tolerance based convergence check.
+bool TrustRegionMinimizer::ParameterToleranceReached() {
+  iteration_summary_.step_norm = (x_ - candidate_x_).norm();
+  const double step_size_tolerance =
+      options_.parameter_tolerance * (x_norm_ + options_.parameter_tolerance);
+
+  if (iteration_summary_.step_norm > step_size_tolerance) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf(
+      "Parameter tolerance reached. "
+      "Relative step_norm: %e <= %e.",
+      (iteration_summary_.step_norm / (x_norm_ + options_.parameter_tolerance)),
+      options_.parameter_tolerance);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Solver::Options::function_tolerance based convergence check.
+bool TrustRegionMinimizer::FunctionToleranceReached() {
+  iteration_summary_.cost_change = x_cost_ - candidate_cost_;
+  const double absolute_function_tolerance =
+      options_.function_tolerance * x_cost_;
+
+  if (fabs(iteration_summary_.cost_change) > absolute_function_tolerance) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf(
+      "Function tolerance reached. "
+      "|cost_change|/cost: %e <= %e",
+      fabs(iteration_summary_.cost_change) / x_cost_,
+      options_.function_tolerance);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Compute candidate_x_ = Plus(x_, delta_)
+// Evaluate the cost of candidate_x_ as candidate_cost_.
+//
+// Failure to compute the step or the cost mean that candidate_cost_
+// is set to std::numeric_limits<double>::max(). Unlike
+// EvaluateGradientAndJacobian, failure in this function is not fatal
+// as we are only computing and evaluating a candidate point, and if
+// for some reason we are unable to evaluate it, we consider it to be
+// a point with very high cost. This allows the user to deal with edge
+// cases/constraints as part of the LocalParameterization and
+// CostFunction objects.
+void TrustRegionMinimizer::ComputeCandidatePointAndEvaluateCost() {
+  if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
+    LOG_IF(WARNING, is_not_silent_)
+        << "x_plus_delta = Plus(x, delta) failed. "
+        << "Treating it as a step with infinite cost";
+    candidate_cost_ = std::numeric_limits<double>::max();
+    return;
+  }
+
+  if (!evaluator_->Evaluate(
+          candidate_x_.data(), &candidate_cost_, NULL, NULL, NULL)) {
+    LOG_IF(WARNING, is_not_silent_)
+        << "Step failed to evaluate. "
+        << "Treating it as a step with infinite cost";
+    candidate_cost_ = std::numeric_limits<double>::max();
+  }
+}
+
+bool TrustRegionMinimizer::IsStepSuccessful() {
+  iteration_summary_.relative_decrease =
+      step_evaluator_->StepQuality(candidate_cost_, model_cost_change_);
+
+  // In most cases, boosting the model_cost_change by the
+  // improvement caused by the inner iterations is fine, but it can
+  // be the case that the original trust region step was so bad that
+  // the resulting improvement in the cost was negative, and the
+  // change caused by the inner iterations was large enough to
+  // improve the step, but also to make relative decrease quite
+  // small.
   //
-  // TODO(sameeragarwal): Make line search available more generally.
-  const bool use_line_search = options.is_constrained;
-
-  summary->termination_type = NO_CONVERGENCE;
-  summary->num_successful_steps = 0;
-  summary->num_unsuccessful_steps = 0;
-  summary->is_constrained = options.is_constrained;
-
-  const int num_parameters = evaluator->NumParameters();
-  const int num_effective_parameters = evaluator->NumEffectiveParameters();
-  const int num_residuals = evaluator->NumResiduals();
-
-  Vector residuals(num_residuals);
-  Vector trust_region_step(num_effective_parameters);
-  Vector delta(num_effective_parameters);
-  Vector x_plus_delta(num_parameters);
-  Vector gradient(num_effective_parameters);
-  Vector model_residuals(num_residuals);
-  Vector scale(num_effective_parameters);
-  Vector negative_gradient(num_effective_parameters);
-  Vector projected_gradient_step(num_parameters);
-
-  IterationSummary iteration_summary;
-  iteration_summary.iteration = 0;
-  iteration_summary.step_is_valid = false;
-  iteration_summary.step_is_successful = false;
-  iteration_summary.cost_change = 0.0;
-  iteration_summary.gradient_max_norm = 0.0;
-  iteration_summary.gradient_norm = 0.0;
-  iteration_summary.step_norm = 0.0;
-  iteration_summary.relative_decrease = 0.0;
-  iteration_summary.trust_region_radius = strategy->Radius();
-  iteration_summary.eta = options_.eta;
-  iteration_summary.linear_solver_iterations = 0;
-  iteration_summary.step_solver_time_in_seconds = 0;
-
-  VectorRef x_min(parameters, num_parameters);
-  Vector x = x_min;
-  // Project onto the feasible set.
-  if (options.is_constrained) {
-    delta.setZero();
-    if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
-      summary->message =
-          "Unable to project initial point onto the feasible set.";
-      summary->termination_type = FAILURE;
-      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
-      return;
-    }
-    x_min = x_plus_delta;
-    x = x_plus_delta;
-  }
-
-  double x_norm = x.norm();
-
-  // Do initial cost and Jacobian evaluation.
-  double cost = 0.0;
-  if (!evaluator->Evaluate(x.data(),
-                           &cost,
-                           residuals.data(),
-                           gradient.data(),
-                           jacobian)) {
-    summary->message = "Residual and Jacobian evaluation failed.";
-    summary->termination_type = FAILURE;
-    LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
-    return;
-  }
-
-  negative_gradient = -gradient;
-  if (!evaluator->Plus(x.data(),
-                       negative_gradient.data(),
-                       projected_gradient_step.data())) {
-    summary->message = "Unable to compute gradient step.";
-    summary->termination_type = FAILURE;
-    LOG(ERROR) << "Terminating: " << summary->message;
-    return;
-  }
-
-  summary->initial_cost = cost + summary->fixed_cost;
-  iteration_summary.cost = cost + summary->fixed_cost;
-  iteration_summary.gradient_max_norm =
-    (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
-  iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
-
-  if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
-    summary->message = StringPrintf("Gradient tolerance reached. "
-                                    "Gradient max norm: %e <= %e",
-                                    iteration_summary.gradient_max_norm,
-                                    options_.gradient_tolerance);
-    summary->termination_type = CONVERGENCE;
-    VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
-
-    // Ensure that there is an iteration summary object for iteration
-    // 0 in Summary::iterations.
-    iteration_summary.iteration_time_in_seconds =
-        WallTimeInSeconds() - iteration_start_time;
-    iteration_summary.cumulative_time_in_seconds =
-        WallTimeInSeconds() - start_time +
-        summary->preprocessor_time_in_seconds;
-    summary->iterations.push_back(iteration_summary);
-    return;
-  }
-
-  if (options_.jacobi_scaling) {
-    EstimateScale(*jacobian, scale.data());
-    jacobian->ScaleColumns(scale.data());
-  } else {
-    scale.setOnes();
-  }
-
-  iteration_summary.iteration_time_in_seconds =
-      WallTimeInSeconds() - iteration_start_time;
-  iteration_summary.cumulative_time_in_seconds =
-      WallTimeInSeconds() - start_time
-      + summary->preprocessor_time_in_seconds;
-  summary->iterations.push_back(iteration_summary);
-
-  int num_consecutive_nonmonotonic_steps = 0;
-  double minimum_cost = cost;
-  double reference_cost = cost;
-  double accumulated_reference_model_cost_change = 0.0;
-  double candidate_cost = cost;
-  double accumulated_candidate_model_cost_change = 0.0;
-  int num_consecutive_invalid_steps = 0;
-  bool inner_iterations_are_enabled =
-      options.inner_iteration_minimizer.get() != NULL;
-  while (true) {
-    bool inner_iterations_were_useful = false;
-    if (!RunCallbacks(options, iteration_summary, summary)) {
-      return;
-    }
-
-    iteration_start_time = WallTimeInSeconds();
-    if (iteration_summary.iteration >= options_.max_num_iterations) {
-      summary->message = "Maximum number of iterations reached.";
-      summary->termination_type = NO_CONVERGENCE;
-      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
-      return;
-    }
-
-    const double total_solver_time = iteration_start_time - start_time +
-        summary->preprocessor_time_in_seconds;
-    if (total_solver_time >= options_.max_solver_time_in_seconds) {
-      summary->message = "Maximum solver time reached.";
-      summary->termination_type = NO_CONVERGENCE;
-      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
-      return;
-    }
-
-    const double strategy_start_time = WallTimeInSeconds();
-    TrustRegionStrategy::PerSolveOptions per_solve_options;
-    per_solve_options.eta = options_.eta;
-    if (find(options_.trust_region_minimizer_iterations_to_dump.begin(),
-             options_.trust_region_minimizer_iterations_to_dump.end(),
-             iteration_summary.iteration) !=
-        options_.trust_region_minimizer_iterations_to_dump.end()) {
-      per_solve_options.dump_format_type =
-          options_.trust_region_problem_dump_format_type;
-      per_solve_options.dump_filename_base =
-          JoinPath(options_.trust_region_problem_dump_directory,
-                   StringPrintf("ceres_solver_iteration_%03d",
-                                iteration_summary.iteration));
-    } else {
-      per_solve_options.dump_format_type = TEXTFILE;
-      per_solve_options.dump_filename_base.clear();
-    }
-
-    TrustRegionStrategy::Summary strategy_summary =
-        strategy->ComputeStep(per_solve_options,
-                              jacobian,
-                              residuals.data(),
-                              trust_region_step.data());
-
-    if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
-      summary->message =
-          "Linear solver failed due to unrecoverable "
-          "non-numeric causes. Please see the error log for clues. ";
-      summary->termination_type = FAILURE;
-      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
-      return;
-    }
-
-    iteration_summary = IterationSummary();
-    iteration_summary.iteration = summary->iterations.back().iteration + 1;
-    iteration_summary.step_solver_time_in_seconds =
-        WallTimeInSeconds() - strategy_start_time;
-    iteration_summary.linear_solver_iterations =
-        strategy_summary.num_iterations;
-    iteration_summary.step_is_valid = false;
-    iteration_summary.step_is_successful = false;
-
-    double model_cost_change = 0.0;
-    if (strategy_summary.termination_type != LINEAR_SOLVER_FAILURE) {
-      // new_model_cost
-      //  = 1/2 [f + J * step]^2
-      //  = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
-      // model_cost_change
-      //  = cost - new_model_cost
-      //  = f'f/2  - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
-      //  = -f'J * step - step' * J' * J * step / 2
-      model_residuals.setZero();
-      jacobian->RightMultiply(trust_region_step.data(), model_residuals.data());
-      model_cost_change =
-          - model_residuals.dot(residuals + model_residuals / 2.0);
-
-      if (model_cost_change < 0.0) {
-        VLOG_IF(1, is_not_silent)
-            << "Invalid step: current_cost: " << cost
-            << " absolute difference " << model_cost_change
-            << " relative difference " << (model_cost_change / cost);
-      } else {
-        iteration_summary.step_is_valid = true;
-      }
-    }
-
-    if (!iteration_summary.step_is_valid) {
-      // Invalid steps can happen due to a number of reasons, and we
-      // allow a limited number of successive failures, and return with
-      // FAILURE if this limit is exceeded.
-      if (++num_consecutive_invalid_steps >=
-          options_.max_num_consecutive_invalid_steps) {
-        summary->message = StringPrintf(
-            "Number of successive invalid steps more "
-            "than Solver::Options::max_num_consecutive_invalid_steps: %d",
-            options_.max_num_consecutive_invalid_steps);
-        summary->termination_type = FAILURE;
-        LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
-        return;
-      }
-
-      // We are going to try and reduce the trust region radius and
-      // solve again. To do this, we are going to treat this iteration
-      // as an unsuccessful iteration. Since the various callbacks are
-      // still executed, we are going to fill the iteration summary
-      // with data that assumes a step of length zero and no progress.
-      iteration_summary.cost = cost + summary->fixed_cost;
-      iteration_summary.cost_change = 0.0;
-      iteration_summary.gradient_max_norm =
-          summary->iterations.back().gradient_max_norm;
-      iteration_summary.gradient_norm =
-          summary->iterations.back().gradient_norm;
-      iteration_summary.step_norm = 0.0;
-      iteration_summary.relative_decrease = 0.0;
-      iteration_summary.eta = options_.eta;
-    } else {
-      // The step is numerically valid, so now we can judge its quality.
-      num_consecutive_invalid_steps = 0;
-
-      // Undo the Jacobian column scaling.
-      delta = (trust_region_step.array() * scale.array()).matrix();
-
-      // Try improving the step further by using an ARMIJO line
-      // search.
-      //
-      // TODO(sameeragarwal): What happens to trust region sizing as
-      // it interacts with the line search ?
-      if (use_line_search) {
-        const LineSearch::Summary line_search_summary =
-            DoLineSearch(options, x, gradient, cost, delta, evaluator);
-
-        // Iterations inside the line search algorithm are considered
-        // 'steps' in the broader context, to distinguish these inner
-        // iterations from from the outer iterations of the trust
-        // region minimizer The number of line search steps is the
-        // total number of inner line search iterations (or steps)
-        // across the entire minimization.
-        summary->num_line_search_steps += line_search_summary.num_iterations;
-        summary->line_search_cost_evaluation_time_in_seconds +=
-            line_search_summary.cost_evaluation_time_in_seconds;
-        summary->line_search_gradient_evaluation_time_in_seconds +=
-            line_search_summary.gradient_evaluation_time_in_seconds;
-        summary->line_search_polynomial_minimization_time_in_seconds +=
-            line_search_summary.polynomial_minimization_time_in_seconds;
-        summary->line_search_total_time_in_seconds +=
-            line_search_summary.total_time_in_seconds;
-
-        if (line_search_summary.success) {
-          delta *= line_search_summary.optimal_step_size;
-        }
-      }
-
-      double new_cost = std::numeric_limits<double>::max();
-      if (evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
-        if (!evaluator->Evaluate(x_plus_delta.data(),
-                                 &new_cost,
-                                 NULL,
-                                 NULL,
-                                 NULL)) {
-          LOG_IF(WARNING, is_not_silent)
-              << "Step failed to evaluate. "
-              << "Treating it as a step with infinite cost";
-          new_cost = std::numeric_limits<double>::max();
-        }
-      } else {
-        LOG_IF(WARNING, is_not_silent)
-            << "x_plus_delta = Plus(x, delta) failed. "
-            << "Treating it as a step with infinite cost";
-      }
-
-      if (new_cost < std::numeric_limits<double>::max()) {
-        // Check if performing an inner iteration will make it better.
-        if (inner_iterations_are_enabled) {
-          ++summary->num_inner_iteration_steps;
-          double inner_iteration_start_time = WallTimeInSeconds();
-          const double x_plus_delta_cost = new_cost;
-          Vector inner_iteration_x = x_plus_delta;
-          Solver::Summary inner_iteration_summary;
-          options.inner_iteration_minimizer->Minimize(options,
-                                                      inner_iteration_x.data(),
-                                                      &inner_iteration_summary);
-          if (!evaluator->Evaluate(inner_iteration_x.data(),
-                                   &new_cost,
-                                   NULL, NULL, NULL)) {
-            VLOG_IF(2, is_not_silent) << "Inner iteration failed.";
-            new_cost = x_plus_delta_cost;
-          } else {
-            x_plus_delta = inner_iteration_x;
-            // Boost the model_cost_change, since the inner iteration
-            // improvements are not accounted for by the trust region.
-            model_cost_change +=  x_plus_delta_cost - new_cost;
-            VLOG_IF(2, is_not_silent)
-                << "Inner iteration succeeded; Current cost: " << cost
-                << " Trust region step cost: " << x_plus_delta_cost
-                << " Inner iteration cost: " << new_cost;
-
-            inner_iterations_were_useful = new_cost < cost;
-
-            const double inner_iteration_relative_progress =
-                1.0 - new_cost / x_plus_delta_cost;
-            // Disable inner iterations once the relative improvement
-            // drops below tolerance.
-            inner_iterations_are_enabled =
-                (inner_iteration_relative_progress >
-                 options.inner_iteration_tolerance);
-            VLOG_IF(2, is_not_silent && !inner_iterations_are_enabled)
-                << "Disabling inner iterations. Progress : "
-                << inner_iteration_relative_progress;
-          }
-          summary->inner_iteration_time_in_seconds +=
-              WallTimeInSeconds() - inner_iteration_start_time;
-        }
-      }
-
-      iteration_summary.step_norm = (x - x_plus_delta).norm();
-
-      // Convergence based on parameter_tolerance.
-      const double step_size_tolerance =  options_.parameter_tolerance *
-          (x_norm + options_.parameter_tolerance);
-      if (iteration_summary.step_norm <= step_size_tolerance) {
-        summary->message =
-            StringPrintf("Parameter tolerance reached. "
-                         "Relative step_norm: %e <= %e.",
-                         (iteration_summary.step_norm /
-                          (x_norm + options_.parameter_tolerance)),
-                         options_.parameter_tolerance);
-        summary->termination_type = CONVERGENCE;
-        VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
-        return;
-      }
-
-      iteration_summary.cost_change =  cost - new_cost;
-      const double absolute_function_tolerance =
-          options_.function_tolerance * cost;
-      if (fabs(iteration_summary.cost_change) <= absolute_function_tolerance) {
-        summary->message =
-            StringPrintf("Function tolerance reached. "
-                         "|cost_change|/cost: %e <= %e",
-                         fabs(iteration_summary.cost_change) / cost,
-                         options_.function_tolerance);
-        summary->termination_type = CONVERGENCE;
-        VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
-        return;
-      }
-
-      const double relative_decrease =
-          iteration_summary.cost_change / model_cost_change;
-
-      const double historical_relative_decrease =
-          (reference_cost - new_cost) /
-          (accumulated_reference_model_cost_change + model_cost_change);
-
-      // If monotonic steps are being used, then the relative_decrease
-      // is the usual ratio of the change in objective function value
-      // divided by the change in model cost.
-      //
-      // If non-monotonic steps are allowed, then we take the maximum
-      // of the relative_decrease and the
-      // historical_relative_decrease, which measures the increase
-      // from a reference iteration. The model cost change is
-      // estimated by accumulating the model cost changes since the
-      // reference iteration. The historical relative_decrease offers
-      // a boost to a step which is not too bad compared to the
-      // reference iteration, allowing for non-monotonic steps.
-      iteration_summary.relative_decrease =
-          options.use_nonmonotonic_steps
-          ? std::max(relative_decrease, historical_relative_decrease)
-          : relative_decrease;
-
-      // Normally, the quality of a trust region step is measured by
-      // the ratio
-      //
-      //              cost_change
-      //    r =    -----------------
-      //           model_cost_change
-      //
-      // All the change in the nonlinear objective is due to the trust
-      // region step so this ratio is a good measure of the quality of
-      // the trust region radius. However, when inner iterations are
-      // being used, cost_change includes the contribution of the
-      // inner iterations and its not fair to credit it all to the
-      // trust region algorithm. So we change the ratio to be
-      //
-      //                              cost_change
-      //    r =    ------------------------------------------------
-      //           (model_cost_change + inner_iteration_cost_change)
-      //
-      // In most cases this is fine, but it can be the case that the
-      // change in solution quality due to inner iterations is so large
-      // and the trust region step is so bad, that this ratio can become
-      // quite small.
-      //
-      // This can cause the trust region loop to reject this step. To
-      // get around this, we expicitly check if the inner iterations
-      // led to a net decrease in the objective function value. If
-      // they did, we accept the step even if the trust region ratio
-      // is small.
-      //
-      // Notice that we do not just check that cost_change is positive
-      // which is a weaker condition and would render the
-      // min_relative_decrease threshold useless. Instead, we keep
-      // track of inner_iterations_were_useful, which is true only
-      // when inner iterations lead to a net decrease in the cost.
-      iteration_summary.step_is_successful =
-          (inner_iterations_were_useful ||
-           iteration_summary.relative_decrease >
-           options_.min_relative_decrease);
-
-      if (iteration_summary.step_is_successful) {
-        accumulated_candidate_model_cost_change += model_cost_change;
-        accumulated_reference_model_cost_change += model_cost_change;
-
-        if (!inner_iterations_were_useful &&
-            relative_decrease <= options_.min_relative_decrease) {
-          iteration_summary.step_is_nonmonotonic = true;
-          VLOG_IF(2, is_not_silent)
-              << "Non-monotonic step! "
-              << " relative_decrease: "
-              << relative_decrease
-              << " historical_relative_decrease: "
-              << historical_relative_decrease;
-        }
-      }
-    }
-
-    if (iteration_summary.step_is_successful) {
-      ++summary->num_successful_steps;
-      strategy->StepAccepted(iteration_summary.relative_decrease);
-
-      x = x_plus_delta;
-      x_norm = x.norm();
-
-      // Step looks good, evaluate the residuals and Jacobian at this
-      // point.
-      if (!evaluator->Evaluate(x.data(),
-                               &cost,
-                               residuals.data(),
-                               gradient.data(),
-                               jacobian)) {
-        summary->message = "Residual and Jacobian evaluation failed.";
-        summary->termination_type = FAILURE;
-        LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
-        return;
-      }
-
-      negative_gradient = -gradient;
-      if (!evaluator->Plus(x.data(),
-                           negative_gradient.data(),
-                           projected_gradient_step.data())) {
-        summary->message =
-            "projected_gradient_step = Plus(x, -gradient) failed.";
-        summary->termination_type = FAILURE;
-        LOG(ERROR) << "Terminating: " << summary->message;
-        return;
-      }
-
-      iteration_summary.gradient_max_norm =
-          (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
-      iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
-
-      if (options_.jacobi_scaling) {
-        jacobian->ScaleColumns(scale.data());
-      }
-
-      // Update the best, reference and candidate iterates.
-      //
-      // Based on algorithm 10.1.2 (page 357) of "Trust Region
-      // Methods" by Conn Gould & Toint, or equations 33-40 of
-      // "Non-monotone trust-region algorithms for nonlinear
-      // optimization subject to convex constraints" by Phil Toint,
-      // Mathematical Programming, 77, 1997.
-      if (cost < minimum_cost) {
-        // A step that improves solution quality was found.
-        x_min = x;
-        minimum_cost = cost;
-        // Set the candidate iterate to the current point.
-        candidate_cost = cost;
-        num_consecutive_nonmonotonic_steps = 0;
-        accumulated_candidate_model_cost_change = 0.0;
-      } else {
-        ++num_consecutive_nonmonotonic_steps;
-        if (cost > candidate_cost) {
-          // The current iterate is has a higher cost than the
-          // candidate iterate. Set the candidate to this point.
-          VLOG_IF(2, is_not_silent)
-              << "Updating the candidate iterate to the current point.";
-          candidate_cost = cost;
-          accumulated_candidate_model_cost_change = 0.0;
-        }
-
-        // At this point we have made too many non-monotonic steps and
-        // we are going to reset the value of the reference iterate so
-        // as to force the algorithm to descend.
-        //
-        // This is the case because the candidate iterate has a value
-        // greater than minimum_cost but smaller than the reference
-        // iterate.
-        if (num_consecutive_nonmonotonic_steps ==
-            options.max_consecutive_nonmonotonic_steps) {
-          VLOG_IF(2, is_not_silent)
-              << "Resetting the reference point to the candidate point";
-          reference_cost = candidate_cost;
-          accumulated_reference_model_cost_change =
-              accumulated_candidate_model_cost_change;
-        }
-      }
-    } else {
-      ++summary->num_unsuccessful_steps;
-      if (iteration_summary.step_is_valid) {
-        strategy->StepRejected(iteration_summary.relative_decrease);
-      } else {
-        strategy->StepIsInvalid();
-      }
-    }
-
-    iteration_summary.cost = cost + summary->fixed_cost;
-    iteration_summary.trust_region_radius = strategy->Radius();
-    iteration_summary.iteration_time_in_seconds =
-        WallTimeInSeconds() - iteration_start_time;
-    iteration_summary.cumulative_time_in_seconds =
-        WallTimeInSeconds() - start_time
-        + summary->preprocessor_time_in_seconds;
-    summary->iterations.push_back(iteration_summary);
-
-    // If the step was successful, check for the gradient norm
-    // collapsing to zero, and if the step is unsuccessful then check
-    // if the trust region radius has collapsed to zero.
-    //
-    // For correctness (Number of IterationSummary objects, correct
-    // final cost, and state update) these convergence tests need to
-    // be performed at the end of the iteration.
-    if (iteration_summary.step_is_successful) {
-      // Gradient norm can only go down in successful steps.
-      if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
-        summary->message = StringPrintf("Gradient tolerance reached. "
-                                        "Gradient max norm: %e <= %e",
-                                        iteration_summary.gradient_max_norm,
-                                        options_.gradient_tolerance);
-        summary->termination_type = CONVERGENCE;
-        VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
-        return;
-      }
-    } else {
-      // Trust region radius can only go down if the step if
-      // unsuccessful.
-      if (iteration_summary.trust_region_radius <
-          options_.min_trust_region_radius) {
-        summary->message = "Termination. Minimum trust region radius reached.";
-        summary->termination_type = CONVERGENCE;
-        VLOG_IF(1, is_not_silent) << summary->message;
-        return;
-      }
-    }
-  }
+  // This can cause the trust region loop to reject this step. To
+  // get around this, we expicitly check if the inner iterations
+  // led to a net decrease in the objective function value. If
+  // they did, we accept the step even if the trust region ratio
+  // is small.
+  //
+  // Notice that we do not just check that cost_change is positive
+  // which is a weaker condition and would render the
+  // min_relative_decrease threshold useless. Instead, we keep
+  // track of inner_iterations_were_useful, which is true only
+  // when inner iterations lead to a net decrease in the cost.
+  return (inner_iterations_were_useful_ ||
+          iteration_summary_.relative_decrease >
+              options_.min_relative_decrease);
 }
 
+bool TrustRegionMinimizer::HandleSuccessfulStep() {
+  x_ = candidate_x_;
+  x_norm_ = x_.norm();
+
+  if (!EvaluateGradientAndJacobian()) {
+    return false;
+  }
+
+  iteration_summary_.step_is_successful = true;
+  strategy_->StepAccepted(iteration_summary_.relative_decrease);
+  step_evaluator_->StepAccepted(candidate_cost_, model_cost_change_);
+  return true;
+}
+
+void TrustRegionMinimizer::HandleUnsuccessfulStep() {
+  iteration_summary_.step_is_successful = false;
+  strategy_->StepRejected(iteration_summary_.relative_decrease);
+  iteration_summary_.cost = candidate_cost_ + solver_summary_->fixed_cost;
+}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/trust_region_minimizer.h b/internal/ceres/trust_region_minimizer.h
index ed52c26..ac4a6ed 100644
--- a/internal/ceres/trust_region_minimizer.h
+++ b/internal/ceres/trust_region_minimizer.h
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2016 Google Inc. All rights reserved.
 // http://ceres-solver.org/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -31,35 +31,106 @@
 #ifndef CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
 #define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
 
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
 #include "ceres/minimizer.h"
 #include "ceres/solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_step_evaluator.h"
+#include "ceres/trust_region_strategy.h"
 #include "ceres/types.h"
 
 namespace ceres {
 namespace internal {
 
-// Generic trust region minimization algorithm. The heavy lifting is
-// done by a TrustRegionStrategy object passed in as part of options.
+// Generic trust region minimization algorithm.
 //
 // For example usage, see SolverImpl::Minimize.
 class TrustRegionMinimizer : public Minimizer {
  public:
-  ~TrustRegionMinimizer() {}
+  ~TrustRegionMinimizer();
+
+  // This method is not thread safe.
   virtual void Minimize(const Minimizer::Options& options,
                         double* parameters,
-                        Solver::Summary* summary);
+                        Solver::Summary* solver_summary);
 
  private:
-  void Init(const Minimizer::Options& options);
-  void EstimateScale(const SparseMatrix& jacobian, double* scale) const;
-  bool MaybeDumpLinearLeastSquaresProblem(const int iteration,
-                                          const SparseMatrix* jacobian,
-                                          const double* residuals,
-                                          const double* step) const;
+  void Init(const Minimizer::Options& options,
+            double* parameters,
+            Solver::Summary* solver_summary);
+  bool IterationZero();
+  bool FinalizeIterationAndCheckIfMinimizerCanContinue();
+  bool ComputeTrustRegionStep();
+
+  bool EvaluateGradientAndJacobian();
+  void ComputeCandidatePointAndEvaluateCost();
+
+  void DoLineSearch(const Vector& x,
+                    const Vector& gradient,
+                    const double cost,
+                    Vector* delta);
+  void DoInnerIterationsIfNeeded();
+
+  bool ParameterToleranceReached();
+  bool FunctionToleranceReached();
+  bool GradientToleranceReached();
+  bool MaxSolverTimeReached();
+  bool MaxSolverIterationsReached();
+  bool MinTrustRegionRadiusReached();
+
+  bool IsStepSuccessful();
+  void HandleUnsuccessfulStep();
+  bool HandleSuccessfulStep();
+  bool HandleInvalidStep();
 
   Minimizer::Options options_;
+
+  // These pointers are shortcuts to objects passed to the
+  // TrustRegionMinimizer. The TrustRegionMinimizer does not own them.
+  double* parameters_;
+  Solver::Summary* solver_summary_;
+  Evaluator* evaluator_;
+  SparseMatrix* jacobian_;
+  TrustRegionStrategy* strategy_;
+
+  scoped_ptr<TrustRegionStepEvaluator> step_evaluator_;
+
+  bool is_not_silent_;
+  bool inner_iterations_are_enabled_;
+  bool inner_iterations_were_useful_;
+
+  // Summary of the current iteration.
+  IterationSummary iteration_summary_;
+
+  int num_parameters_;
+  int num_effective_parameters_;
+  int num_residuals_;
+
+  Vector delta_;
+  Vector gradient_;
+  Vector inner_iteration_x_;
+  Vector model_residuals_;
+  Vector negative_gradient_;
+  Vector projected_gradient_step_;
+  Vector residuals_;
+  Vector trust_region_step_;
+  Vector x_;
+  Vector candidate_x_;
+  Vector jacobian_scaling_;
+
+  double x_norm_;
+  double x_cost_;
+  double minimum_cost_;
+  double model_cost_change_;
+  double candidate_cost_;
+
+  double start_time_;
+  double iteration_start_time_;
+  int num_consecutive_invalid_steps_;
 };
 
 }  // namespace internal
 }  // namespace ceres
+
 #endif  // CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
diff --git a/internal/ceres/trust_region_step_evaluator.cc b/internal/ceres/trust_region_step_evaluator.cc
new file mode 100644
index 0000000..c9167e6
--- /dev/null
+++ b/internal/ceres/trust_region_step_evaluator.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include "ceres/trust_region_step_evaluator.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+TrustRegionStepEvaluator::TrustRegionStepEvaluator(
+    const double initial_cost,
+    const int max_consecutive_nonmonotonic_steps)
+    : max_consecutive_nonmonotonic_steps_(max_consecutive_nonmonotonic_steps),
+      minimum_cost_(initial_cost),
+      current_cost_(initial_cost),
+      reference_cost_(initial_cost),
+      candidate_cost_(initial_cost),
+      accumulated_reference_model_cost_change_(0.0),
+      accumulated_candidate_model_cost_change_(0.0),
+      num_consecutive_nonmonotonic_steps_(0){
+}
+
+double TrustRegionStepEvaluator::StepQuality(
+    const double cost,
+    const double model_cost_change) const {
+  const double relative_decrease = (current_cost_ - cost) / model_cost_change;
+  const double historical_relative_decrease =
+      (reference_cost_ - cost) /
+      (accumulated_reference_model_cost_change_ + model_cost_change);
+  return std::max(relative_decrease, historical_relative_decrease);
+}
+
+void TrustRegionStepEvaluator::StepAccepted(
+    const double cost,
+    const double model_cost_change) {
+  // Algorithm 10.1.2 from Trust Region Methods by Conn, Gould &
+  // Toint.
+  //
+  // Step 3a
+  current_cost_ = cost;
+  accumulated_candidate_model_cost_change_ += model_cost_change;
+  accumulated_reference_model_cost_change_ += model_cost_change;
+
+  // Step 3b.
+  if (current_cost_ < minimum_cost_) {
+    minimum_cost_ = current_cost_;
+    num_consecutive_nonmonotonic_steps_ = 0;
+    candidate_cost_ = current_cost_;
+    accumulated_candidate_model_cost_change_ = 0.0;
+  } else {
+    // Step 3c.
+    ++num_consecutive_nonmonotonic_steps_;
+    if (current_cost_ > candidate_cost_) {
+      candidate_cost_ = current_cost_;
+      accumulated_candidate_model_cost_change_ = 0.0;
+    }
+  }
+
+  // Step 3d.
+  //
+  // At this point we have made too many non-monotonic steps and
+  // we are going to reset the value of the reference iterate so
+  // as to force the algorithm to descend.
+  //
+  // Note: In the original algorithm by Toint, this step was only
+  // executed if the step was non-monotonic, but that would not handle
+  // the case of max_consecutive_nonmonotonic_steps = 0. The small
+  // modification of doing this always handles that corner case
+  // correctly.
+  if (num_consecutive_nonmonotonic_steps_ ==
+      max_consecutive_nonmonotonic_steps_) {
+    reference_cost_ = candidate_cost_;
+    accumulated_reference_model_cost_change_ =
+        accumulated_candidate_model_cost_change_;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_step_evaluator.h b/internal/ceres/trust_region_step_evaluator.h
new file mode 100644
index 0000000..1f14954
--- /dev/null
+++ b/internal/ceres/trust_region_step_evaluator.h
@@ -0,0 +1,98 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_STEP_EVALUATOR_H_
+#define CERES_INTERNAL_TRUST_REGION_STEP_EVALUATOR_H_
+
+namespace ceres {
+namespace internal {
+
+// The job of the TrustRegionStepEvaluator is to evaluate the quality
+// of a step, i.e., how the cost of a step compares with the reduction
+// in the objective of the trust region problem.
+//
+// Classic trust region methods are descent methods, in that they only
+// accept a point if it strictly reduces the value of the objective
+// function. They do this by measuring the quality of a step as
+//
+//   cost_change / model_cost_change.
+//
+// Relaxing the monotonic descent requirement allows the algorithm to
+// be more efficient in the long term at the cost of some local
+// increase in the value of the objective function.
+//
+// This is because allowing for non-decreasing objective function
+// values in a principaled manner allows the algorithm to "jump over
+// boulders" as the method is not restricted to move into narrow
+// valleys while preserving its convergence properties.
+//
+// The parameter max_consecutive_nonmonotonic_steps controls the
+// window size used by the step selection algorithm to accept
+// non-monotonic steps. Setting this parameter to zero, recovers the
+// classic montonic descent algorithm.
+//
+// Based on algorithm 10.1.2 (page 357) of "Trust Region
+// Methods" by Conn Gould & Toint, or equations 33-40 of
+// "Non-monotone trust-region algorithms for nonlinear
+// optimization subject to convex constraints" by Phil Toint,
+// Mathematical Programming, 77, 1997.
+//
+// Example usage:
+//
+// TrustRegionStepEvaluator* step_evaluator = ...
+//
+// cost = ... // Compute the non-linear objective function value.
+// model_cost_change = ... // Change in the value of the trust region objective.
+// if (step_evaluator->StepQuality(cost, model_cost_change) > threshold) {
+//   x = x + delta;
+//   step_evaluator->StepAccepted(cost, model_cost_change);
+// }
+class TrustRegionStepEvaluator {
+ public:
+  TrustRegionStepEvaluator(double initial_cost,
+                           int max_consecutive_nonmonotonic_steps);
+  double StepQuality(double cost, double model_cost_change) const;
+  void StepAccepted(double cost, double model_cost_change);
+
+ private:
+  const int max_consecutive_nonmonotonic_steps_;
+  double minimum_cost_;
+  double current_cost_;
+  double reference_cost_;
+  double candidate_cost_;
+  double accumulated_reference_model_cost_change_;
+  double accumulated_candidate_model_cost_change_;
+  int num_consecutive_nonmonotonic_steps_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TRUST_REGION_STEP_EVALUATOR_H_
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 9560e67..36e8e98 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -86,20 +86,20 @@
   struct PerSolveOptions {
     PerSolveOptions()
         : eta(0),
-          dump_filename_base(""),
           dump_format_type(TEXTFILE) {
     }
 
     // Forcing sequence for inexact solves.
     double eta;
 
+    DumpFormatType dump_format_type;
+
     // If non-empty and dump_format_type is not CONSOLE, the trust
     // regions strategy will write the linear system to file(s) with
     // name starting with dump_filename_base.  If dump_format_type is
     // CONSOLE then dump_filename_base will be ignored and the linear
     // system will be written to the standard error.
     std::string dump_filename_base;
-    DumpFormatType dump_format_type;
   };
 
   struct Summary {