blob: 19e3f3583483da7197e478e087c71873a5c1c1cd [file] [log] [blame]
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -07001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/trust_region_minimizer.h"
32
33#include <algorithm>
34#include <cstdlib>
35#include <cmath>
36#include <cstring>
37#include <string>
38#include <vector>
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070039
40#include "Eigen/Core"
41#include "ceres/array_utils.h"
42#include "ceres/evaluator.h"
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070043#include "ceres/internal/eigen.h"
44#include "ceres/internal/scoped_ptr.h"
Sameer Agarwal0beab862012-08-13 15:12:01 -070045#include "ceres/linear_least_squares_problems.h"
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070046#include "ceres/sparse_matrix.h"
47#include "ceres/trust_region_strategy.h"
48#include "ceres/types.h"
Sameer Agarwal0beab862012-08-13 15:12:01 -070049#include "glog/logging.h"
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070050
51namespace ceres {
52namespace internal {
53namespace {
54// Small constant for various floating point issues.
55const double kEpsilon = 1e-12;
56} // namespace
57
58// Execute the list of IterationCallbacks sequentially. If any one of
59// the callbacks does not return SOLVER_CONTINUE, then stop and return
60// its status.
61CallbackReturnType TrustRegionMinimizer::RunCallbacks(
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070062 const IterationSummary& iteration_summary) {
63 for (int i = 0; i < options_.callbacks.size(); ++i) {
64 const CallbackReturnType status =
65 (*options_.callbacks[i])(iteration_summary);
66 if (status != SOLVER_CONTINUE) {
67 return status;
68 }
69 }
70 return SOLVER_CONTINUE;
71}
72
73// Compute a scaling vector that is used to improve the conditioning
74// of the Jacobian.
75void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian,
76 double* scale) const {
77 jacobian.SquaredColumnNorm(scale);
78 for (int i = 0; i < jacobian.num_cols(); ++i) {
79 scale[i] = 1.0 / (kEpsilon + sqrt(scale[i]));
80 }
81}
82
83void TrustRegionMinimizer::Init(const Minimizer::Options& options) {
84 options_ = options;
85 sort(options_.lsqp_iterations_to_dump.begin(),
86 options_.lsqp_iterations_to_dump.end());
87}
88
89bool TrustRegionMinimizer::MaybeDumpLinearLeastSquaresProblem(
90 const int iteration,
91 const SparseMatrix* jacobian,
92 const double* residuals,
93 const double* step) const {
94 // TODO(sameeragarwal): Since the use of trust_region_radius has
95 // moved inside TrustRegionStrategy, its not clear how we dump the
96 // regularization vector/matrix anymore.
97 //
98 // Doing this right requires either an API change to the
99 // TrustRegionStrategy and/or how LinearLeastSquares problems are
100 // stored on disk.
101 //
102 // For now, we will just not dump the regularizer.
103 return (!binary_search(options_.lsqp_iterations_to_dump.begin(),
104 options_.lsqp_iterations_to_dump.end(),
105 iteration) ||
106 DumpLinearLeastSquaresProblem(options_.lsqp_dump_directory,
107 iteration,
108 options_.lsqp_dump_format_type,
109 jacobian,
110 NULL,
111 residuals,
112 step,
113 options_.num_eliminate_blocks));
114}
115
116void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
117 double* parameters,
118 Solver::Summary* summary) {
119 time_t start_time = time(NULL);
120 time_t iteration_start_time = start_time;
121 Init(options);
122
123 summary->termination_type = NO_CONVERGENCE;
124 summary->num_successful_steps = 0;
125 summary->num_unsuccessful_steps = 0;
126
127 Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
128 SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
129 TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
130
131 const int num_parameters = evaluator->NumParameters();
132 const int num_effective_parameters = evaluator->NumEffectiveParameters();
133 const int num_residuals = evaluator->NumResiduals();
134
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700135 VectorRef x_min(parameters, num_parameters);
136 Vector x = x_min;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700137 double x_norm = x.norm();
138
139 Vector residuals(num_residuals);
140 Vector trust_region_step(num_effective_parameters);
141 Vector delta(num_effective_parameters);
142 Vector x_plus_delta(num_parameters);
143 Vector gradient(num_effective_parameters);
144 Vector model_residuals(num_residuals);
145 Vector scale(num_effective_parameters);
146
147 IterationSummary iteration_summary;
148 iteration_summary.iteration = 0;
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700149 iteration_summary.step_is_valid = false;
150 iteration_summary.step_is_successful = false;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700151 iteration_summary.cost = summary->initial_cost;
152 iteration_summary.cost_change = 0.0;
153 iteration_summary.gradient_max_norm = 0.0;
154 iteration_summary.step_norm = 0.0;
155 iteration_summary.relative_decrease = 0.0;
156 iteration_summary.trust_region_radius = strategy->Radius();
157 // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or
158 // something similar across the board.
159 iteration_summary.eta = options_.eta;
160 iteration_summary.linear_solver_iterations = 0;
Sameer Agarwalfa015192012-06-11 14:21:42 -0700161 iteration_summary.step_solver_time_in_seconds = 0;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700162
163 // Do initial cost and Jacobian evaluation.
164 double cost = 0.0;
Keir Mierlef44907f2012-07-06 13:52:32 -0700165 if (!evaluator->Evaluate(x.data(), &cost, residuals.data(), NULL, jacobian)) {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700166 LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
167 summary->termination_type = NUMERICAL_FAILURE;
168 return;
169 }
170
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700171 int num_consecutive_nonmonotonic_steps = 0;
172 double minimum_cost = cost;
173 double reference_cost = cost;
174 double accumulated_reference_model_cost_change = 0.0;
175 double candidate_cost = cost;
176 double accumulated_candidate_model_cost_change = 0.0;
177
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700178 gradient.setZero();
179 jacobian->LeftMultiply(residuals.data(), gradient.data());
180 iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
181
182 if (options_.jacobi_scaling) {
183 EstimateScale(*jacobian, scale.data());
184 jacobian->ScaleColumns(scale.data());
185 } else {
186 scale.setOnes();
187 }
188
Sameer Agarwal4441b5b2012-06-12 18:01:11 -0700189 // The initial gradient max_norm is bounded from below so that we do
190 // not divide by zero.
191 const double gradient_max_norm_0 =
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700192 max(iteration_summary.gradient_max_norm, kEpsilon);
Sameer Agarwal4441b5b2012-06-12 18:01:11 -0700193 const double absolute_gradient_tolerance =
194 options_.gradient_tolerance * gradient_max_norm_0;
195
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700196 if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
197 summary->termination_type = GRADIENT_TOLERANCE;
198 VLOG(1) << "Terminating: Gradient tolerance reached."
199 << "Relative gradient max norm: "
Sameer Agarwal4441b5b2012-06-12 18:01:11 -0700200 << iteration_summary.gradient_max_norm / gradient_max_norm_0
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700201 << " <= " << options_.gradient_tolerance;
202 return;
203 }
204
Sameer Agarwalfa015192012-06-11 14:21:42 -0700205 iteration_summary.iteration_time_in_seconds =
206 time(NULL) - iteration_start_time;
207 iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time +
208 summary->preprocessor_time_in_seconds;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700209 summary->iterations.push_back(iteration_summary);
Sameer Agarwalfa015192012-06-11 14:21:42 -0700210
211 // Call the various callbacks.
Keir Mierlef7471832012-06-14 11:31:53 -0700212 switch (RunCallbacks(iteration_summary)) {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700213 case SOLVER_TERMINATE_SUCCESSFULLY:
214 summary->termination_type = USER_SUCCESS;
215 VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
216 return;
217 case SOLVER_ABORT:
218 summary->termination_type = USER_ABORT;
219 VLOG(1) << "Terminating: User callback returned USER_ABORT.";
220 return;
221 case SOLVER_CONTINUE:
222 break;
223 default:
224 LOG(FATAL) << "Unknown type of user callback status";
225 }
226
227 int num_consecutive_invalid_steps = 0;
228 while (true) {
229 iteration_start_time = time(NULL);
230 if (iteration_summary.iteration >= options_.max_num_iterations) {
231 summary->termination_type = NO_CONVERGENCE;
232 VLOG(1) << "Terminating: Maximum number of iterations reached.";
233 break;
234 }
235
Sameer Agarwalfa015192012-06-11 14:21:42 -0700236 const double total_solver_time = iteration_start_time - start_time +
237 summary->preprocessor_time_in_seconds;
238 if (total_solver_time >= options_.max_solver_time_in_seconds) {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700239 summary->termination_type = NO_CONVERGENCE;
240 VLOG(1) << "Terminating: Maximum solver time reached.";
241 break;
242 }
243
244 iteration_summary = IterationSummary();
245 iteration_summary = summary->iterations.back();
246 iteration_summary.iteration = summary->iterations.back().iteration + 1;
247 iteration_summary.step_is_valid = false;
248 iteration_summary.step_is_successful = false;
249
250 const time_t strategy_start_time = time(NULL);
251 TrustRegionStrategy::PerSolveOptions per_solve_options;
252 per_solve_options.eta = options_.eta;
Sameer Agarwal05292bf2012-08-20 07:40:45 -0700253 TrustRegionStrategy::Summary strategy_summary =
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700254 strategy->ComputeStep(per_solve_options,
255 jacobian,
256 residuals.data(),
257 trust_region_step.data());
258
Sameer Agarwalfa015192012-06-11 14:21:42 -0700259 iteration_summary.step_solver_time_in_seconds =
260 time(NULL) - strategy_start_time;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700261 iteration_summary.linear_solver_iterations =
262 strategy_summary.num_iterations;
263
264 if (!MaybeDumpLinearLeastSquaresProblem(iteration_summary.iteration,
265 jacobian,
266 residuals.data(),
267 trust_region_step.data())) {
268 LOG(FATAL) << "Tried writing linear least squares problem: "
269 << options.lsqp_dump_directory << "but failed.";
270 }
271
272 double new_model_cost = 0.0;
273 if (strategy_summary.termination_type != FAILURE) {
Markus Moll47d26bc2012-08-16 00:23:38 +0200274 // new_model_cost = 1/2 |f + J * step|^2
275 model_residuals = residuals;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700276 jacobian->RightMultiply(trust_region_step.data(), model_residuals.data());
277 new_model_cost = model_residuals.squaredNorm() / 2.0;
278
279 // In exact arithmetic, this would never be the case. But poorly
280 // conditioned matrices can give rise to situations where the
281 // new_model_cost can actually be larger than half the squared
282 // norm of the residual vector. We allow for small tolerance
283 // around cost and beyond that declare the step to be invalid.
284 if (cost < (new_model_cost - kEpsilon)) {
285 VLOG(1) << "Invalid step: current_cost: " << cost
286 << " new_model_cost " << new_model_cost;
287 } else {
288 iteration_summary.step_is_valid = true;
289 }
290 }
291
292 if (!iteration_summary.step_is_valid) {
293 // Invalid steps can happen due to a number of reasons, and we
294 // allow a limited number of successive failures, and return with
295 // NUMERICAL_FAILURE if this limit is exceeded.
296 if (++num_consecutive_invalid_steps >=
297 options_.max_num_consecutive_invalid_steps) {
298 summary->termination_type = NUMERICAL_FAILURE;
299 LOG(WARNING) << "Terminating. Number of successive invalid steps more "
300 << "than "
301 << "Solver::Options::max_num_consecutive_invalid_steps: "
302 << options_.max_num_consecutive_invalid_steps;
303 return;
304 }
305
306 // We are going to try and reduce the trust region radius and
307 // solve again. To do this, we are going to treat this iteration
308 // as an unsuccessful iteration. Since the various callbacks are
309 // still executed, we are going to fill the iteration summary
310 // with data that assumes a step of length zero and no progress.
311 iteration_summary.cost = cost;
312 iteration_summary.cost_change = 0.0;
313 iteration_summary.gradient_max_norm =
314 summary->iterations.back().gradient_max_norm;
315 iteration_summary.step_norm = 0.0;
316 iteration_summary.relative_decrease = 0.0;
317 iteration_summary.eta = options_.eta;
318 } else {
319 // The step is numerically valid, so now we can judge its quality.
320 num_consecutive_invalid_steps = 0;
321
322 // We allow some slop around 0, and clamp the model_cost_change
323 // at kEpsilon from below.
324 //
325 // There is probably a better way to do this, as it is going to
326 // create problems for problems where the objective function is
327 // kEpsilon close to zero.
328 const double model_cost_change = max(kEpsilon, cost - new_model_cost);
329
330 // Undo the Jacobian column scaling.
Markus Moll47d26bc2012-08-16 00:23:38 +0200331 delta = (trust_region_step.array() * scale.array()).matrix();
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700332 iteration_summary.step_norm = delta.norm();
333
334 // Convergence based on parameter_tolerance.
335 const double step_size_tolerance = options_.parameter_tolerance *
336 (x_norm + options_.parameter_tolerance);
337 if (iteration_summary.step_norm <= step_size_tolerance) {
338 VLOG(1) << "Terminating. Parameter tolerance reached. "
339 << "relative step_norm: "
340 << iteration_summary.step_norm /
341 (x_norm + options_.parameter_tolerance)
342 << " <= " << options_.parameter_tolerance;
343 summary->termination_type = PARAMETER_TOLERANCE;
344 return;
345 }
346
347 if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
348 summary->termination_type = NUMERICAL_FAILURE;
349 LOG(WARNING) << "Terminating. Failed to compute "
350 << "Plus(x, delta, x_plus_delta).";
351 return;
352 }
353
354 // Try this step.
355 double new_cost;
Keir Mierlef44907f2012-07-06 13:52:32 -0700356 if (!evaluator->Evaluate(x_plus_delta.data(),
357 &new_cost,
358 NULL, NULL, NULL)) {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700359 summary->termination_type = NUMERICAL_FAILURE;
360 LOG(WARNING) << "Terminating: Cost evaluation failed.";
361 return;
362 }
363
Sameer Agarwald28b3c82012-06-05 21:50:31 -0700364 VLOG(2) << "old cost: " << cost << " new cost: " << new_cost;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700365 iteration_summary.cost_change = cost - new_cost;
366 const double absolute_function_tolerance =
367 options_.function_tolerance * cost;
368 if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
369 VLOG(1) << "Terminating. Function tolerance reached. "
Sameer Agarwalfa015192012-06-11 14:21:42 -0700370 << "|cost_change|/cost: "
371 << fabs(iteration_summary.cost_change) / cost
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700372 << " <= " << options_.function_tolerance;
373 summary->termination_type = FUNCTION_TOLERANCE;
374 return;
375 }
376
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700377 const double relative_decrease =
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700378 iteration_summary.cost_change / model_cost_change;
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700379
380 const double historical_relative_decrease =
381 (reference_cost - new_cost) /
382 (accumulated_reference_model_cost_change + model_cost_change);
383
384 // If monotonic steps are being used, then the relative_decrease
385 // is the usual ratio of the change in objective function value
386 // divided by the change in model cost.
387 //
388 // If non-monotonic steps are allowed, then we take the maximum
389 // of the relative_decrease and the
390 // historical_relative_decrease, which measures the increase
391 // from a reference iteration. The model cost change is
392 // estimated by accumulating the model cost changes since the
393 // reference iteration. The historical relative_decrease offers
394 // a boost to a step which is not too bad compared to the
395 // reference iteration, allowing for non-monotonic steps.
396 iteration_summary.relative_decrease =
397 options.use_nonmonotonic_steps
398 ? max(relative_decrease, historical_relative_decrease)
399 : relative_decrease;
400
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700401 iteration_summary.step_is_successful =
402 iteration_summary.relative_decrease > options_.min_relative_decrease;
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700403
404 if (iteration_summary.step_is_successful) {
405 accumulated_candidate_model_cost_change += model_cost_change;
406 accumulated_reference_model_cost_change += model_cost_change;
407 if (relative_decrease <= options_.min_relative_decrease) {
408 VLOG(2) << "Non-monotonic step! "
409 << " relative_decrease: " << relative_decrease
410 << " historical_relative_decrease: "
411 << historical_relative_decrease;
412 }
413 }
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700414 }
415
416 if (iteration_summary.step_is_successful) {
417 ++summary->num_successful_steps;
418 strategy->StepAccepted(iteration_summary.relative_decrease);
419 x = x_plus_delta;
420 x_norm = x.norm();
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700421
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700422 // Step looks good, evaluate the residuals and Jacobian at this
423 // point.
Keir Mierlef44907f2012-07-06 13:52:32 -0700424 if (!evaluator->Evaluate(x.data(),
425 &cost,
426 residuals.data(),
427 NULL,
428 jacobian)) {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700429 summary->termination_type = NUMERICAL_FAILURE;
430 LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
431 return;
432 }
433
434 gradient.setZero();
435 jacobian->LeftMultiply(residuals.data(), gradient.data());
436 iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
437
438 if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
439 summary->termination_type = GRADIENT_TOLERANCE;
440 VLOG(1) << "Terminating: Gradient tolerance reached."
441 << "Relative gradient max norm: "
Sameer Agarwal4441b5b2012-06-12 18:01:11 -0700442 << iteration_summary.gradient_max_norm / gradient_max_norm_0
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700443 << " <= " << options_.gradient_tolerance;
444 return;
445 }
446
447 if (options_.jacobi_scaling) {
448 jacobian->ScaleColumns(scale.data());
449 }
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700450
451 // Update the best, reference and candidate iterates.
452 //
453 // Based on algorithm 10.1.2 (page 357) of "Trust Region
454 // Methods" by Conn Gould & Toint, or equations 33-40 of
455 // "Non-monotone trust-region algorithms for nonlinear
456 // optimization subject to convex constraints" by Phil Toint,
457 // Mathematical Programming, 77, 1997.
458 if (cost < minimum_cost) {
459 // A step that improves solution quality was found.
460 x_min = x;
461 minimum_cost = cost;
462 // Set the candidate iterate to the current point.
463 candidate_cost = cost;
464 num_consecutive_nonmonotonic_steps = 0;
465 accumulated_candidate_model_cost_change = 0.0;
466 } else {
467 ++num_consecutive_nonmonotonic_steps;
468 if (cost > candidate_cost) {
469 // The current iterate is has a higher cost than the
470 // candidate iterate. Set the candidate to this point.
471 VLOG(2) << "Updating the candidate iterate to the current point.";
472 candidate_cost = cost;
473 accumulated_candidate_model_cost_change = 0.0;
474 }
475
476 // At this point we have made too many non-monotonic steps and
477 // we are going to reset the value of the reference iterate so
478 // as to force the algorithm to descend.
479 //
480 // This is the case because the candidate iterate has a value
481 // greater than minimum_cost but smaller than the reference
482 // iterate.
483 if (num_consecutive_nonmonotonic_steps ==
484 options.max_consecutive_nonmonotonic_steps) {
485 VLOG(2) << "Resetting the reference point to the candidate point";
486 reference_cost = candidate_cost;
487 accumulated_reference_model_cost_change =
488 accumulated_candidate_model_cost_change;
489 }
490 }
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700491 } else {
492 ++summary->num_unsuccessful_steps;
Sameer Agarwalfa015192012-06-11 14:21:42 -0700493 if (iteration_summary.step_is_valid) {
494 strategy->StepRejected(iteration_summary.relative_decrease);
495 } else {
496 strategy->StepIsInvalid();
497 }
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700498 }
499
500 iteration_summary.cost = cost + summary->fixed_cost;
501 iteration_summary.trust_region_radius = strategy->Radius();
502 if (iteration_summary.trust_region_radius <
503 options_.min_trust_region_radius) {
504 summary->termination_type = PARAMETER_TOLERANCE;
505 VLOG(1) << "Termination. Minimum trust region radius reached.";
506 return;
507 }
508
Sameer Agarwalfa015192012-06-11 14:21:42 -0700509 iteration_summary.iteration_time_in_seconds =
510 time(NULL) - iteration_start_time;
511 iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time +
512 summary->preprocessor_time_in_seconds;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700513 summary->iterations.push_back(iteration_summary);
Sameer Agarwalfa015192012-06-11 14:21:42 -0700514
Keir Mierlef7471832012-06-14 11:31:53 -0700515 switch (RunCallbacks(iteration_summary)) {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700516 case SOLVER_TERMINATE_SUCCESSFULLY:
517 summary->termination_type = USER_SUCCESS;
518 VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
519 return;
520 case SOLVER_ABORT:
521 summary->termination_type = USER_ABORT;
522 VLOG(1) << "Terminating: User callback returned USER_ABORT.";
523 return;
524 case SOLVER_CONTINUE:
525 break;
526 default:
527 LOG(FATAL) << "Unknown type of user callback status";
528 }
529 }
530}
531
532
533} // namespace internal
534} // namespace ceres