Add IterationSummary::gradient_norm. Iteration summary now reports the 2-norm of the gradient also. Change-Id: I1ed7f1456ee4f546c9b42423d7a4ec3079ec078f
diff --git a/docs/source/solving.rst b/docs/source/solving.rst index f17c695..1ab3dba 100644 --- a/docs/source/solving.rst +++ b/docs/source/solving.rst
@@ -1588,6 +1588,9 @@ // Infinity norm of the gradient vector. double gradient_max_norm; + // 2-norm of the gradient vector. + double gradient_norm; + // 2-norm of the size of the step computed by the optimization // algorithm. double step_norm;
diff --git a/include/ceres/iteration_callback.h b/include/ceres/iteration_callback.h index 987c2d9..5689256 100644 --- a/include/ceres/iteration_callback.h +++ b/include/ceres/iteration_callback.h
@@ -50,6 +50,7 @@ cost(0.0), cost_change(0.0), gradient_max_norm(0.0), + gradient_norm(0.0), step_norm(0.0), eta(0.0), step_size(0.0), @@ -100,6 +101,9 @@ // Infinity norm of the gradient vector. double gradient_max_norm; + // 2-norm of the gradient vector. + double gradient_norm; + // 2-norm of the size of the step computed by the optimization // algorithm. double step_norm;
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc index 6ee514a..b7e96c8 100644 --- a/internal/ceres/line_search_minimizer.cc +++ b/internal/ceres/line_search_minimizer.cc
@@ -119,6 +119,7 @@ iteration_summary.step_is_successful = false; iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = 0.0; + iteration_summary.gradient_norm = 0.0; iteration_summary.step_norm = 0.0; iteration_summary.linear_solver_iterations = 0; iteration_summary.step_solver_time_in_seconds = 0; @@ -135,6 +136,7 @@ iteration_summary.cost = current_state.cost + summary->fixed_cost; iteration_summary.gradient_max_norm = current_state.gradient_max_norm; + iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm); // The initial gradient max_norm is bounded from below so that we do // not divide by zero. @@ -331,6 +333,8 @@ } iteration_summary.gradient_max_norm = current_state.gradient_max_norm; + iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm); + if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { VLOG_IF(1, is_not_silent) << "Terminating: Gradient tolerance reached."
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc index 81dc3e1..ea7ee74 100644 --- a/internal/ceres/trust_region_minimizer.cc +++ b/internal/ceres/trust_region_minimizer.cc
@@ -113,6 +113,7 @@ iteration_summary.step_is_successful = false; iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = 0.0; + iteration_summary.gradient_norm = 0.0; iteration_summary.step_norm = 0.0; iteration_summary.relative_decrease = 0.0; iteration_summary.trust_region_radius = strategy->Radius(); @@ -145,6 +146,7 @@ summary->initial_cost = cost + summary->fixed_cost; iteration_summary.cost = cost + summary->fixed_cost; iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>(); + iteration_summary.gradient_norm = gradient.norm(); // The initial gradient max_norm is bounded from below so that we do // not divide by zero. @@ -283,6 +285,8 @@ iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = summary->iterations.back().gradient_max_norm; + iteration_summary.gradient_norm = + summary->iterations.back().gradient_norm; iteration_summary.step_norm = 0.0; iteration_summary.relative_decrease = 0.0; iteration_summary.eta = options_.eta; @@ -478,6 +482,7 @@ } iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>(); + iteration_summary.gradient_norm = gradient.norm(); if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { VLOG_IF(1, is_not_silent) << "Terminating: Gradient tolerance reached."