Pipe minimizer termination messages to Solver::Summary. All minimizer termination messages are now available as Solver::Summary::error. This is part of the ongoing refactoring or Change-Id: I4514c3c042645bbd1471bcde9bd3dbf81d9ee8b0
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc index 1093bad..4590afe 100644 --- a/internal/ceres/line_search_minimizer.cc +++ b/internal/ceres/line_search_minimizer.cc
@@ -126,9 +126,9 @@ // Do initial cost and Jacobian evaluation. if (!Evaluate(evaluator, x, ¤t_state)) { - LOG_IF(WARNING, is_not_silent) - << "Terminating: Cost and gradient evaluation failed."; + summary->error = "Terminating: Cost and gradient evaluation failed."; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -146,12 +146,14 @@ options.gradient_tolerance * initial_gradient_max_norm; if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { - VLOG_IF(1, is_not_silent) - << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / initial_gradient_max_norm - << " <= " << options.gradient_tolerance; + summary->error = + StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e", + iteration_summary.gradient_max_norm / + initial_gradient_max_norm, + options.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -198,10 +200,8 @@ line_search_options, &summary->error)); if (line_search.get() == NULL) { - LOG_IF(ERROR, is_not_silent) - << "Ceres bug: Unable to create a LineSearch object, please " - << "contact the developers!, error: " << summary->error; summary->termination_type = DID_NOT_RUN; + LOG_IF(ERROR, is_not_silent) << summary->error; return; } @@ -215,17 +215,18 @@ iteration_start_time = WallTimeInSeconds(); if (iteration_summary.iteration >= options.max_num_iterations) { - VLOG_IF(1, is_not_silent) - << "Terminating: Maximum number of iterations reached."; + summary->error = "Terminating: Maximum number of iterations reached."; summary->termination_type = NO_CONVERGENCE; + VLOG_IF(1, is_not_silent) << summary->error; break; } const double total_solver_time = iteration_start_time - start_time + summary->preprocessor_time_in_seconds; if (total_solver_time >= options.max_solver_time_in_seconds) { - VLOG_IF(1, is_not_silent) << "Terminating: Maximum solver time reached."; + summary->error = "Terminating: Maximum solver time reached."; summary->termination_type = NO_CONVERGENCE; + VLOG_IF(1, is_not_silent) << summary->error; break; } @@ -251,12 +252,11 @@ // have already reached our specified maximum number of restarts, // terminate optimization. summary->error = - StringPrintf("Line search direction failure: specified " + StringPrintf("Termination: Line search direction failure: specified " "max_num_line_search_direction_restarts: %d reached.", options.max_num_line_search_direction_restarts); - LOG_IF(WARNING, is_not_silent) << summary->error - << " terminating optimization."; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; break; } else if (!line_search_status) { // Restart line search direction with gradient descent on first iteration @@ -305,8 +305,8 @@ "(current_cost - previous_cost): %.5e", initial_step_size, current_state.directional_derivative, (current_state.cost - previous_state.cost)); - LOG_IF(WARNING, is_not_silent) << summary->error; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; break; } @@ -355,13 +355,14 @@ iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm); if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { - VLOG_IF(1, is_not_silent) - << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << (iteration_summary.gradient_max_norm / - initial_gradient_max_norm) - << " <= " << options.gradient_tolerance; + summary->error = + StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e. ", + (iteration_summary.gradient_max_norm / + initial_gradient_max_norm), + options.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; break; } @@ -369,12 +370,14 @@ const double absolute_function_tolerance = options.function_tolerance * previous_state.cost; if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) { - VLOG_IF(1, is_not_silent) - << "Terminating. Function tolerance reached. " - << "|cost_change|/cost: " - << fabs(iteration_summary.cost_change) / previous_state.cost - << " <= " << options.function_tolerance; + summary->error = + StringPrintf("Terminating. Function tolerance reached. " + "|cost_change|/cost: %e <= %e", + fabs(iteration_summary.cost_change) / + previous_state.cost, + options.function_tolerance); summary->termination_type = FUNCTION_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; }
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc index e09c008..9b630e1 100644 --- a/internal/ceres/trust_region_minimizer.cc +++ b/internal/ceres/trust_region_minimizer.cc
@@ -117,8 +117,6 @@ iteration_summary.step_norm = 0.0; iteration_summary.relative_decrease = 0.0; iteration_summary.trust_region_radius = strategy->Radius(); - // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or - // something similar across the board. iteration_summary.eta = options_.eta; iteration_summary.linear_solver_iterations = 0; iteration_summary.step_solver_time_in_seconds = 0; @@ -130,9 +128,9 @@ residuals.data(), gradient.data(), jacobian)) { - LOG_IF(WARNING, is_not_silent) - << "Terminating: Residual and Jacobian evaluation failed."; + summary->error = "Terminating: Residual and Jacobian evaluation failed."; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -156,12 +154,13 @@ options_.gradient_tolerance * initial_gradient_max_norm; if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { - VLOG_IF(1, is_not_silent) << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << (iteration_summary.gradient_max_norm / - initial_gradient_max_norm) - << " <= " << options_.gradient_tolerance; + summary->error = StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e", + (iteration_summary.gradient_max_norm / + initial_gradient_max_norm), + options_.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -189,18 +188,18 @@ iteration_start_time = WallTimeInSeconds(); if (iteration_summary.iteration >= options_.max_num_iterations) { - VLOG_IF(1, is_not_silent) - << "Terminating: Maximum number of iterations reached."; + summary->error = "Terminating: Maximum number of iterations reached."; summary->termination_type = NO_CONVERGENCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } const double total_solver_time = iteration_start_time - start_time + summary->preprocessor_time_in_seconds; if (total_solver_time >= options_.max_solver_time_in_seconds) { - VLOG_IF(1, is_not_silent) - << "Terminating: Maximum solver time reached."; + summary->error = "Terminating: Maximum solver time reached."; summary->termination_type = NO_CONVERGENCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -237,14 +236,16 @@ iteration_summary.step_is_valid = false; iteration_summary.step_is_successful = false; - double model_cost_change = 0.0; if (strategy_summary.termination_type == FATAL_ERROR) { - summary->error = "Terminating. Linear solver encountered a fatal error."; - LOG_IF(WARNING, is_not_silent) << summary->error; + summary->error = + "Terminating. Linear solver failed due to unrecoverable " + "non-numeric causes. Please see the error log for clues. "; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } + double model_cost_change = 0.0; if (strategy_summary.termination_type != FAILURE) { // new_model_cost // = 1/2 [f + J * step]^2 @@ -278,8 +279,8 @@ "Terminating. Number of successive invalid steps more " "than Solver::Options::max_num_consecutive_invalid_steps: %d", options_.max_num_consecutive_invalid_steps); - LOG_IF(WARNING, is_not_silent) << summary->error; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -366,13 +367,14 @@ const double step_size_tolerance = options_.parameter_tolerance * (x_norm + options_.parameter_tolerance); if (iteration_summary.step_norm <= step_size_tolerance) { - VLOG_IF(1, is_not_silent) - << "Terminating. Parameter tolerance reached. " - << "relative step_norm: " - << (iteration_summary.step_norm / - (x_norm + options_.parameter_tolerance)) - << " <= " << options_.parameter_tolerance; + summary->error = + StringPrintf("Terminating. Parameter tolerance reached. " + "relative step_norm: %e <= %e.", + (iteration_summary.step_norm / + (x_norm + options_.parameter_tolerance)), + options_.parameter_tolerance); summary->termination_type = PARAMETER_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -380,11 +382,13 @@ const double absolute_function_tolerance = options_.function_tolerance * cost; if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) { - VLOG_IF(1, is_not_silent) << "Terminating. Function tolerance reached. " - << "|cost_change|/cost: " - << fabs(iteration_summary.cost_change) / cost - << " <= " << options_.function_tolerance; + summary->error = + StringPrintf("Terminating. Function tolerance reached. " + "|cost_change|/cost: %e <= %e", + fabs(iteration_summary.cost_change) / cost, + options_.function_tolerance); summary->termination_type = FUNCTION_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -483,8 +487,8 @@ jacobian)) { summary->error = "Terminating: Residual and Jacobian evaluation failed."; - LOG_IF(WARNING, is_not_silent) << summary->error; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -492,12 +496,14 @@ iteration_summary.gradient_norm = gradient.norm(); if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { - VLOG_IF(1, is_not_silent) << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << (iteration_summary.gradient_max_norm / - initial_gradient_max_norm) - << " <= " << options_.gradient_tolerance; + summary->error = + StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e", + (iteration_summary.gradient_max_norm / + initial_gradient_max_norm), + options_.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -560,9 +566,9 @@ iteration_summary.trust_region_radius = strategy->Radius(); if (iteration_summary.trust_region_radius < options_.min_trust_region_radius) { - VLOG_IF(1, is_not_silent) - << "Termination. Minimum trust region radius reached."; + summary->error = "Termination. Minimum trust region radius reached."; summary->termination_type = PARAMETER_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; }