Reduce verbosity of the inner iteration minimizer.
Add Minimizer::Options::is_silent which allows the user
to turn off the logging inside the minimizer completely.
In particularly this is used for silencing the logging
when inner iterations are used.
Add VLOG_IF to miniglog.
Change-Id: I4dc56e726eb012b4bbf750dc92adedba1a6d9c38
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index c4da987..bfe93c4 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -227,6 +227,7 @@
minimizer_options.evaluator = evaluator.get();
minimizer_options.jacobian = jacobian.get();
minimizer_options.trust_region_strategy = trust_region_strategy.get();
+ minimizer_options.is_silent = true;
TrustRegionMinimizer minimizer;
minimizer.Minimize(minimizer_options, parameter, summary);
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc
index 5533e20..6ee514a 100644
--- a/internal/ceres/line_search_minimizer.cc
+++ b/internal/ceres/line_search_minimizer.cc
@@ -93,6 +93,7 @@
void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
double* parameters,
Solver::Summary* summary) {
+ const bool is_not_silent = !options.is_silent;
double start_time = WallTimeInSeconds();
double iteration_start_time = start_time;
@@ -124,7 +125,8 @@
// Do initial cost and Jacobian evaluation.
if (!Evaluate(evaluator, x, ¤t_state)) {
- LOG(WARNING) << "Terminating: Cost and gradient evaluation failed.";
+ LOG_IF(WARNING, is_not_silent)
+ << "Terminating: Cost and gradient evaluation failed.";
summary->termination_type = NUMERICAL_FAILURE;
return;
}
@@ -142,11 +144,12 @@
options.gradient_tolerance * initial_gradient_max_norm;
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ VLOG_IF(1, is_not_silent)
+ << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << iteration_summary.gradient_max_norm / initial_gradient_max_norm
+ << " <= " << options.gradient_tolerance;
summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << iteration_summary.gradient_max_norm / initial_gradient_max_norm
- << " <= " << options.gradient_tolerance;
return;
}
@@ -193,8 +196,9 @@
line_search_options,
&summary->error));
if (line_search.get() == NULL) {
- LOG(ERROR) << "Ceres bug: Unable to create a LineSearch object, please "
- << "contact the developers!, error: " << summary->error;
+ LOG_IF(ERROR, is_not_silent)
+ << "Ceres bug: Unable to create a LineSearch object, please "
+ << "contact the developers!, error: " << summary->error;
summary->termination_type = DID_NOT_RUN;
return;
}
@@ -209,16 +213,17 @@
iteration_start_time = WallTimeInSeconds();
if (iteration_summary.iteration >= options.max_num_iterations) {
+ VLOG_IF(1, is_not_silent)
+ << "Terminating: Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum number of iterations reached.";
break;
}
const double total_solver_time = iteration_start_time - start_time +
summary->preprocessor_time_in_seconds;
if (total_solver_time >= options.max_solver_time_in_seconds) {
+ VLOG_IF(1, is_not_silent) << "Terminating: Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum solver time reached.";
break;
}
@@ -247,10 +252,10 @@
StringPrintf("Line search direction failure: specified "
"max_num_line_search_direction_restarts: %d reached.",
options.max_num_line_search_direction_restarts);
- LOG(WARNING) << summary->error << " terminating optimization.";
+ LOG_IF(WARNING, is_not_silent) << summary->error
+ << " terminating optimization.";
summary->termination_type = NUMERICAL_FAILURE;
break;
-
} else if (!line_search_status) {
// Restart line search direction with gradient descent on first iteration
// as we have not yet reached our maximum number of restarts.
@@ -258,13 +263,16 @@
options.max_num_line_search_direction_restarts);
++num_line_search_direction_restarts;
- LOG(WARNING)
+ LOG_IF(WARNING, is_not_silent)
<< "Line search direction algorithm: "
- << LineSearchDirectionTypeToString(options.line_search_direction_type)
- << ", failed to produce a valid new direction at iteration: "
- << iteration_summary.iteration << ". Restarting, number of "
- << "restarts: " << num_line_search_direction_restarts << " / "
- << options.max_num_line_search_direction_restarts << " [max].";
+ << LineSearchDirectionTypeToString(
+ options.line_search_direction_type)
+ << ", failed to produce a valid new direction at "
+ << "iteration: " << iteration_summary.iteration
+ << ". Restarting, number of restarts: "
+ << num_line_search_direction_restarts << " / "
+ << options.max_num_line_search_direction_restarts
+ << " [max].";
line_search_direction.reset(
LineSearchDirection::Create(line_search_direction_options));
current_state.search_direction = -current_state.gradient;
@@ -295,7 +303,7 @@
"(current_cost - previous_cost): %.5e",
initial_step_size, current_state.directional_derivative,
(current_state.cost - previous_state.cost));
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, is_not_silent) << summary->error;
summary->termination_type = NUMERICAL_FAILURE;
break;
}
@@ -314,20 +322,23 @@
// TODO(sameeragarwal): Collect stats.
if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
- LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. ";
+ LOG_IF(WARNING, is_not_silent)
+ << "x_plus_delta = Plus(x, delta) failed. ";
} else if (!Evaluate(evaluator, x_plus_delta, ¤t_state)) {
- LOG(WARNING) << "Step failed to evaluate. ";
+ LOG_IF(WARNING, is_not_silent) << "Step failed to evaluate. ";
} else {
x = x_plus_delta;
}
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ VLOG_IF(1, is_not_silent)
+ << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << (iteration_summary.gradient_max_norm /
+ initial_gradient_max_norm)
+ << " <= " << options.gradient_tolerance;
summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << iteration_summary.gradient_max_norm / initial_gradient_max_norm
- << " <= " << options.gradient_tolerance;
break;
}
@@ -335,10 +346,11 @@
const double absolute_function_tolerance =
options.function_tolerance * previous_state.cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
- VLOG(1) << "Terminating. Function tolerance reached. "
- << "|cost_change|/cost: "
- << fabs(iteration_summary.cost_change) / previous_state.cost
- << " <= " << options.function_tolerance;
+ VLOG_IF(1, is_not_silent)
+ << "Terminating. Function tolerance reached. "
+ << "|cost_change|/cost: "
+ << fabs(iteration_summary.cost_change) / previous_state.cost
+ << " <= " << options.function_tolerance;
summary->termination_type = FUNCTION_TOLERANCE;
return;
}
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
index ff131e5..9156d77 100644
--- a/internal/ceres/miniglog/glog/logging.h
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -294,10 +294,12 @@
# define LOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
# define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
# define LG LOG_IF(INFO, INFO <= MAX_LOG_LEVEL)
+# define VLOG_IF(n, condition) LOG_IF(n, (n <= MAX_LOG_LEVEL) && condition)
#else
# define LOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream() // NOLINT
# define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream() // NOLINT
# define LG MessageLogger((char *)__FILE__, __LINE__, "native", INFO).stream() // NOLINT
+# define VLOG_IF(n, condition) LOG_IF(n, condition)
#endif
// Currently, VLOG is always on for levels below MAX_LOG_LEVEL.
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
index 622e9ce..3d9da99 100644
--- a/internal/ceres/minimizer.h
+++ b/internal/ceres/minimizer.h
@@ -107,6 +107,7 @@
options.line_search_sufficient_curvature_decrease;
max_line_search_step_expansion =
options.max_line_search_step_expansion;
+ is_silent = false;
evaluator = NULL;
trust_region_strategy = NULL;
jacobian = NULL;
@@ -153,6 +154,8 @@
double line_search_sufficient_curvature_decrease;
double max_line_search_step_expansion;
+ // If true, then all logging is disabled.
+ bool is_silent;
// List of callbacks that are executed by the Minimizer at the end
// of each iteration.
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 8e2bc67..81dc3e1 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -81,6 +81,7 @@
double start_time = WallTimeInSeconds();
double iteration_start_time = start_time;
Init(options);
+ const bool is_not_silent = !options.is_silent;
summary->termination_type = NO_CONVERGENCE;
summary->num_successful_steps = 0;
@@ -128,7 +129,8 @@
residuals.data(),
gradient.data(),
jacobian)) {
- LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
+ LOG_IF(WARNING, is_not_silent)
+ << "Terminating: Residual and Jacobian evaluation failed.";
summary->termination_type = NUMERICAL_FAILURE;
return;
}
@@ -152,11 +154,12 @@
options_.gradient_tolerance * initial_gradient_max_norm;
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ VLOG_IF(1, is_not_silent) << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << (iteration_summary.gradient_max_norm /
+ initial_gradient_max_norm)
+ << " <= " << options_.gradient_tolerance;
summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << iteration_summary.gradient_max_norm / initial_gradient_max_norm
- << " <= " << options_.gradient_tolerance;
return;
}
@@ -184,17 +187,19 @@
iteration_start_time = WallTimeInSeconds();
if (iteration_summary.iteration >= options_.max_num_iterations) {
+ VLOG_IF(1, is_not_silent)
+ << "Terminating: Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum number of iterations reached.";
- break;
+ return;
}
const double total_solver_time = iteration_start_time - start_time +
summary->preprocessor_time_in_seconds;
if (total_solver_time >= options_.max_solver_time_in_seconds) {
+ VLOG_IF(1, is_not_silent)
+ << "Terminating: Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum solver time reached.";
- break;
+ return;
}
const double strategy_start_time = WallTimeInSeconds();
@@ -245,9 +250,10 @@
- model_residuals.dot(residuals + model_residuals / 2.0);
if (model_cost_change < 0.0) {
- VLOG(1) << "Invalid step: current_cost: " << cost
- << " absolute difference " << model_cost_change
- << " relative difference " << (model_cost_change / cost);
+ VLOG_IF(1, is_not_silent)
+ << "Invalid step: current_cost: " << cost
+ << " absolute difference " << model_cost_change
+ << " relative difference " << (model_cost_change / cost);
} else {
iteration_summary.step_is_valid = true;
}
@@ -259,13 +265,12 @@
// NUMERICAL_FAILURE if this limit is exceeded.
if (++num_consecutive_invalid_steps >=
options_.max_num_consecutive_invalid_steps) {
- summary->termination_type = NUMERICAL_FAILURE;
summary->error = StringPrintf(
"Terminating. Number of successive invalid steps more "
"than Solver::Options::max_num_consecutive_invalid_steps: %d",
options_.max_num_consecutive_invalid_steps);
-
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, is_not_silent) << summary->error;
+ summary->termination_type = NUMERICAL_FAILURE;
return;
}
@@ -314,30 +319,30 @@
if (!evaluator->Evaluate(inner_iteration_x.data(),
&new_cost,
NULL, NULL, NULL)) {
- VLOG(2) << "Inner iteration failed.";
+ VLOG_IF(2, is_not_silent) << "Inner iteration failed.";
new_cost = x_plus_delta_cost;
} else {
x_plus_delta = inner_iteration_x;
// Boost the model_cost_change, since the inner iteration
// improvements are not accounted for by the trust region.
model_cost_change += x_plus_delta_cost - new_cost;
- VLOG(2) << "Inner iteration succeeded; current cost: " << cost
- << " x_plus_delta_cost: " << x_plus_delta_cost
- << " new_cost: " << new_cost;
- const double inner_iteration_relative_progress =
- 1.0 - new_cost / x_plus_delta_cost;
- inner_iterations_are_enabled =
- (inner_iteration_relative_progress >
- options.inner_iteration_tolerance);
+ VLOG_IF(2, is_not_silent)
+ << "Inner iteration succeeded; Current cost: " << cost
+ << " Trust region step cost: " << x_plus_delta_cost
+ << " Inner iteration cost: " << new_cost;
inner_iterations_were_useful = new_cost < cost;
+ const double inner_iteration_relative_progress =
+ 1.0 - new_cost / x_plus_delta_cost;
// Disable inner iterations once the relative improvement
// drops below tolerance.
- if (!inner_iterations_are_enabled) {
- VLOG(2) << "Disabling inner iterations. Progress : "
- << inner_iteration_relative_progress;
- }
+ inner_iterations_are_enabled =
+ (inner_iteration_relative_progress >
+ options.inner_iteration_tolerance);
+ VLOG_IF(2, is_not_silent && !inner_iterations_are_enabled)
+ << "Disabling inner iterations. Progress : "
+ << inner_iteration_relative_progress;
}
summary->inner_iteration_time_in_seconds +=
WallTimeInSeconds() - inner_iteration_start_time;
@@ -350,11 +355,12 @@
const double step_size_tolerance = options_.parameter_tolerance *
(x_norm + options_.parameter_tolerance);
if (iteration_summary.step_norm <= step_size_tolerance) {
- VLOG(1) << "Terminating. Parameter tolerance reached. "
- << "relative step_norm: "
- << iteration_summary.step_norm /
- (x_norm + options_.parameter_tolerance)
- << " <= " << options_.parameter_tolerance;
+ VLOG_IF(1, is_not_silent)
+ << "Terminating. Parameter tolerance reached. "
+ << "relative step_norm: "
+ << (iteration_summary.step_norm /
+ (x_norm + options_.parameter_tolerance))
+ << " <= " << options_.parameter_tolerance;
summary->termination_type = PARAMETER_TOLERANCE;
return;
}
@@ -363,10 +369,10 @@
const double absolute_function_tolerance =
options_.function_tolerance * cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
- VLOG(1) << "Terminating. Function tolerance reached. "
- << "|cost_change|/cost: "
- << fabs(iteration_summary.cost_change) / cost
- << " <= " << options_.function_tolerance;
+ VLOG_IF(1, is_not_silent) << "Terminating. Function tolerance reached. "
+ << "|cost_change|/cost: "
+ << fabs(iteration_summary.cost_change) / cost
+ << " <= " << options_.function_tolerance;
summary->termination_type = FUNCTION_TOLERANCE;
return;
}
@@ -441,10 +447,12 @@
if (!inner_iterations_were_useful &&
relative_decrease <= options_.min_relative_decrease) {
iteration_summary.step_is_nonmonotonic = true;
- VLOG(2) << "Non-monotonic step! "
- << " relative_decrease: " << relative_decrease
- << " historical_relative_decrease: "
- << historical_relative_decrease;
+ VLOG_IF(2, is_not_silent)
+ << "Non-monotonic step! "
+ << " relative_decrease: "
+ << relative_decrease
+ << " historical_relative_decrease: "
+ << historical_relative_decrease;
}
}
}
@@ -462,22 +470,22 @@
residuals.data(),
gradient.data(),
jacobian)) {
- summary->termination_type = NUMERICAL_FAILURE;
summary->error =
"Terminating: Residual and Jacobian evaluation failed.";
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, is_not_silent) << summary->error;
+ summary->termination_type = NUMERICAL_FAILURE;
return;
}
iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ VLOG_IF(1, is_not_silent) << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << (iteration_summary.gradient_max_norm /
+ initial_gradient_max_norm)
+ << " <= " << options_.gradient_tolerance;
summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << (iteration_summary.gradient_max_norm /
- initial_gradient_max_norm)
- << " <= " << options_.gradient_tolerance;
return;
}
@@ -505,7 +513,8 @@
if (cost > candidate_cost) {
// The current iterate is has a higher cost than the
// candidate iterate. Set the candidate to this point.
- VLOG(2) << "Updating the candidate iterate to the current point.";
+ VLOG_IF(2, is_not_silent)
+ << "Updating the candidate iterate to the current point.";
candidate_cost = cost;
accumulated_candidate_model_cost_change = 0.0;
}
@@ -519,7 +528,8 @@
// iterate.
if (num_consecutive_nonmonotonic_steps ==
options.max_consecutive_nonmonotonic_steps) {
- VLOG(2) << "Resetting the reference point to the candidate point";
+ VLOG_IF(2, is_not_silent)
+ << "Resetting the reference point to the candidate point";
reference_cost = candidate_cost;
accumulated_reference_model_cost_change =
accumulated_candidate_model_cost_change;
@@ -538,8 +548,9 @@
iteration_summary.trust_region_radius = strategy->Radius();
if (iteration_summary.trust_region_radius <
options_.min_trust_region_radius) {
+ VLOG_IF(1, is_not_silent)
+ << "Termination. Minimum trust region radius reached.";
summary->termination_type = PARAMETER_TOLERANCE;
- VLOG(1) << "Termination. Minimum trust region radius reached.";
return;
}