Cleanup example code
Remove "using ceres:foo" directives from example code. The using
directives actually make the code harder to read unless you already
know the ceres API. By making the namespace explicit it is clear
to the reader that these are functions and objects from the Ceres
API.
Change-Id: I89b1281c754bf71c0f82e39e1607c5e40a148388
diff --git a/examples/circle_fit.cc b/examples/circle_fit.cc
index f2450f4..2d398c4 100644
--- a/examples/circle_fit.cc
+++ b/examples/circle_fit.cc
@@ -57,14 +57,6 @@
#include "gflags/gflags.h"
#include "glog/logging.h"
-using ceres::AutoDiffCostFunction;
-using ceres::CauchyLoss;
-using ceres::CostFunction;
-using ceres::LossFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
DEFINE_double(robust_threshold,
0.0,
"Robust loss parameter. Set to 0 for normal squared error (no "
@@ -128,20 +120,20 @@
// Parameterize r as m^2 so that it can't be negative.
double m = sqrt(r);
- Problem problem;
+ ceres::Problem problem;
// Configure the loss function.
- LossFunction* loss = nullptr;
+ ceres::LossFunction* loss = nullptr;
if (CERES_GET_FLAG(FLAGS_robust_threshold)) {
- loss = new CauchyLoss(CERES_GET_FLAG(FLAGS_robust_threshold));
+ loss = new ceres::CauchyLoss(CERES_GET_FLAG(FLAGS_robust_threshold));
}
// Add the residuals.
double xx, yy;
int num_points = 0;
while (scanf("%lf %lf\n", &xx, &yy) == 2) {
- CostFunction* cost =
- new AutoDiffCostFunction<DistanceFromCircleCost, 1, 1, 1, 1>(
+ ceres::CostFunction* cost =
+ new ceres::AutoDiffCostFunction<DistanceFromCircleCost, 1, 1, 1, 1>(
new DistanceFromCircleCost(xx, yy));
problem.AddResidualBlock(cost, loss, &x, &y, &m);
num_points++;
@@ -150,11 +142,11 @@
std::cout << "Got " << num_points << " points.\n";
// Build and solve the problem.
- Solver::Options options;
+ ceres::Solver::Options options;
options.max_num_iterations = 500;
options.linear_solver_type = ceres::DENSE_QR;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
// Recover r from m.
r = m * m;
diff --git a/examples/curve_fitting.cc b/examples/curve_fitting.cc
index e151827..8cbf3aa 100644
--- a/examples/curve_fitting.cc
+++ b/examples/curve_fitting.cc
@@ -31,12 +31,6 @@
#include "ceres/ceres.h"
#include "glog/logging.h"
-using ceres::AutoDiffCostFunction;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
// Data generated using the following octave code.
// randn('seed', 23497);
// m = 0.3;
@@ -137,28 +131,30 @@
int main(int argc, char** argv) {
google::InitGoogleLogging(argv[0]);
- double m = 0.0;
- double c = 0.0;
+ const double initial_m = 0.0;
+ const double initial_c = 0.0;
+ double m = initial_m;
+ double c = initial_c;
- Problem problem;
+ ceres::Problem problem;
for (int i = 0; i < kNumObservations; ++i) {
problem.AddResidualBlock(
- new AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
+ new ceres::AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
new ExponentialResidual(data[2 * i], data[2 * i + 1])),
nullptr,
&m,
&c);
}
- Solver::Options options;
+ ceres::Solver::Options options;
options.max_num_iterations = 25;
options.linear_solver_type = ceres::DENSE_QR;
options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
- std::cout << "Initial m: " << 0.0 << " c: " << 0.0 << "\n";
+ std::cout << "Initial m: " << initial_m << " c: " << initial_c << "\n";
std::cout << "Final m: " << m << " c: " << c << "\n";
return 0;
}
diff --git a/examples/helloworld.cc b/examples/helloworld.cc
index 190842f..c9972fd 100644
--- a/examples/helloworld.cc
+++ b/examples/helloworld.cc
@@ -36,12 +36,6 @@
#include "ceres/ceres.h"
#include "glog/logging.h"
-using ceres::AutoDiffCostFunction;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
// A templated cost functor that implements the residual r = 10 -
// x. The method operator() is templated so that we can then use an
// automatic differentiation wrapper around it to generate its
@@ -63,19 +57,19 @@
const double initial_x = x;
// Build the problem.
- Problem problem;
+ ceres::Problem problem;
// Set up the only cost function (also known as residual). This uses
// auto-differentiation to obtain the derivative (jacobian).
- CostFunction* cost_function =
- new AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor);
+ ceres::CostFunction* cost_function =
+ new ceres::AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor);
problem.AddResidualBlock(cost_function, nullptr, &x);
// Run the solver!
- Solver::Options options;
+ ceres::Solver::Options options;
options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
std::cout << "x : " << initial_x << " -> " << x << "\n";
diff --git a/examples/helloworld_analytic_diff.cc b/examples/helloworld_analytic_diff.cc
index fa4b74d..b4826a2 100644
--- a/examples/helloworld_analytic_diff.cc
+++ b/examples/helloworld_analytic_diff.cc
@@ -37,17 +37,11 @@
#include "ceres/ceres.h"
#include "glog/logging.h"
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::SizedCostFunction;
-using ceres::Solve;
-using ceres::Solver;
-
// A CostFunction implementing analytically derivatives for the
// function f(x) = 10 - x.
class QuadraticCostFunction
- : public SizedCostFunction<1 /* number of residuals */,
- 1 /* size of first parameter */> {
+ : public ceres::SizedCostFunction<1 /* number of residuals */,
+ 1 /* size of first parameter */> {
public:
bool Evaluate(double const* const* parameters,
double* residuals,
@@ -86,17 +80,17 @@
const double initial_x = x;
// Build the problem.
- Problem problem;
+ ceres::Problem problem;
// Set up the only cost function (also known as residual).
- CostFunction* cost_function = new QuadraticCostFunction;
+ ceres::CostFunction* cost_function = new QuadraticCostFunction;
problem.AddResidualBlock(cost_function, nullptr, &x);
// Run the solver!
- Solver::Options options;
+ ceres::Solver::Options options;
options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
std::cout << "x : " << initial_x << " -> " << x << "\n";
diff --git a/examples/helloworld_numeric_diff.cc b/examples/helloworld_numeric_diff.cc
index f7c1bbf..4ed9ca6 100644
--- a/examples/helloworld_numeric_diff.cc
+++ b/examples/helloworld_numeric_diff.cc
@@ -34,13 +34,6 @@
#include "ceres/ceres.h"
#include "glog/logging.h"
-using ceres::CENTRAL;
-using ceres::CostFunction;
-using ceres::NumericDiffCostFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
// A cost functor that implements the residual r = 10 - x.
struct CostFunctor {
bool operator()(const double* const x, double* residual) const {
@@ -58,19 +51,20 @@
const double initial_x = x;
// Build the problem.
- Problem problem;
+ ceres::Problem problem;
// Set up the only cost function (also known as residual). This uses
// numeric differentiation to obtain the derivative (jacobian).
- CostFunction* cost_function =
- new NumericDiffCostFunction<CostFunctor, CENTRAL, 1, 1>(new CostFunctor);
+ ceres::CostFunction* cost_function =
+ new ceres::NumericDiffCostFunction<CostFunctor, ceres::CENTRAL, 1, 1>(
+ new CostFunctor);
problem.AddResidualBlock(cost_function, nullptr, &x);
// Run the solver!
- Solver::Options options;
+ ceres::Solver::Options options;
options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
std::cout << "x : " << initial_x << " -> " << x << "\n";
diff --git a/examples/more_garbow_hillstrom.cc b/examples/more_garbow_hillstrom.cc
index 0da97f9..35936f4 100644
--- a/examples/more_garbow_hillstrom.cc
+++ b/examples/more_garbow_hillstrom.cc
@@ -585,7 +585,6 @@
int main(int argc, char** argv) {
GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
google::InitGoogleLogging(argv[0]);
-
using ceres::examples::Solve;
int unconstrained_problems = 0;
diff --git a/examples/powell.cc b/examples/powell.cc
index caee956..80de423 100644
--- a/examples/powell.cc
+++ b/examples/powell.cc
@@ -50,12 +50,6 @@
#include "gflags/gflags.h"
#include "glog/logging.h"
-using ceres::AutoDiffCostFunction;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
struct F1 {
template <typename T>
bool operator()(const T* const x1, const T* const x2, T* residual) const {
@@ -105,20 +99,20 @@
double x3 = 0.0;
double x4 = 1.0;
- Problem problem;
+ ceres::Problem problem;
// Add residual terms to the problem using the autodiff
// wrapper to get the derivatives automatically. The parameters, x1 through
// x4, are modified in place.
problem.AddResidualBlock(
- new AutoDiffCostFunction<F1, 1, 1, 1>(new F1), nullptr, &x1, &x2);
+ new ceres::AutoDiffCostFunction<F1, 1, 1, 1>(new F1), nullptr, &x1, &x2);
problem.AddResidualBlock(
- new AutoDiffCostFunction<F2, 1, 1, 1>(new F2), nullptr, &x3, &x4);
+ new ceres::AutoDiffCostFunction<F2, 1, 1, 1>(new F2), nullptr, &x3, &x4);
problem.AddResidualBlock(
- new AutoDiffCostFunction<F3, 1, 1, 1>(new F3), nullptr, &x2, &x3);
+ new ceres::AutoDiffCostFunction<F3, 1, 1, 1>(new F3), nullptr, &x2, &x3);
problem.AddResidualBlock(
- new AutoDiffCostFunction<F4, 1, 1, 1>(new F4), nullptr, &x1, &x4);
+ new ceres::AutoDiffCostFunction<F4, 1, 1, 1>(new F4), nullptr, &x1, &x4);
- Solver::Options options;
+ ceres::Solver::Options options;
LOG_IF(FATAL,
!ceres::StringToMinimizerType(CERES_GET_FLAG(FLAGS_minimizer),
&options.minimizer_type))
@@ -138,8 +132,8 @@
// clang-format on
// Run the solver!
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.FullReport() << "\n";
// clang-format off
diff --git a/examples/robot_pose_mle.cc b/examples/robot_pose_mle.cc
index cbba3fc..cc60e14 100644
--- a/examples/robot_pose_mle.cc
+++ b/examples/robot_pose_mle.cc
@@ -136,15 +136,6 @@
#include "gflags/gflags.h"
#include "glog/logging.h"
-using ceres::AutoDiffCostFunction;
-using ceres::CauchyLoss;
-using ceres::CostFunction;
-using ceres::DynamicAutoDiffCostFunction;
-using ceres::LossFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
DEFINE_double(corridor_length,
30.0,
"Length of the corridor that the robot is travelling down.");
@@ -166,7 +157,8 @@
static constexpr int kStride = 10;
struct OdometryConstraint {
- using OdometryCostFunction = AutoDiffCostFunction<OdometryConstraint, 1, 1>;
+ using OdometryCostFunction =
+ ceres::AutoDiffCostFunction<OdometryConstraint, 1, 1>;
OdometryConstraint(double odometry_mean, double odometry_stddev)
: odometry_mean(odometry_mean), odometry_stddev(odometry_stddev) {}
@@ -188,7 +180,7 @@
struct RangeConstraint {
using RangeCostFunction =
- DynamicAutoDiffCostFunction<RangeConstraint, kStride>;
+ ceres::DynamicAutoDiffCostFunction<RangeConstraint, kStride>;
RangeConstraint(int pose_index,
double range_reading,
@@ -327,9 +319,9 @@
ceres::Solver::Options solver_options;
solver_options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
+ ceres::Solver::Summary summary;
printf("Solving...\n");
- Solve(solver_options, &problem, &summary);
+ ceres::Solve(solver_options, &problem, &summary);
printf("Done.\n");
std::cout << summary.FullReport() << "\n";
printf("Final values:\n");
diff --git a/examples/robust_curve_fitting.cc b/examples/robust_curve_fitting.cc
index 46fa522..915b619 100644
--- a/examples/robust_curve_fitting.cc
+++ b/examples/robust_curve_fitting.cc
@@ -115,13 +115,6 @@
};
// clang-format on
-using ceres::AutoDiffCostFunction;
-using ceres::CauchyLoss;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
-
struct ExponentialResidual {
ExponentialResidual(double x, double y) : x_(x), y_(y) {}
@@ -139,25 +132,28 @@
int main(int argc, char** argv) {
google::InitGoogleLogging(argv[0]);
- double m = 0.0;
- double c = 0.0;
+ const double initial_m = 0.0;
+ const double initial_c = 0.0;
+ double m = initial_m;
+ double c = initial_c;
- Problem problem;
+ ceres::Problem problem;
for (int i = 0; i < kNumObservations; ++i) {
- CostFunction* cost_function =
- new AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
+ ceres::CostFunction* cost_function =
+ new ceres::AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
new ExponentialResidual(data[2 * i], data[2 * i + 1]));
- problem.AddResidualBlock(cost_function, new CauchyLoss(0.5), &m, &c);
+ problem.AddResidualBlock(cost_function, new ceres::CauchyLoss(0.5), &m, &c);
}
- Solver::Options options;
+ ceres::Solver::Options options;
+ options.max_num_iterations = 25;
options.linear_solver_type = ceres::DENSE_QR;
options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
- std::cout << "Initial m: " << 0.0 << " c: " << 0.0 << "\n";
+ std::cout << "Initial m: " << initial_m << " c: " << initial_c << "\n";
std::cout << "Final m: " << m << " c: " << c << "\n";
return 0;
}
diff --git a/examples/sampled_function/sampled_function.cc b/examples/sampled_function/sampled_function.cc
index bfd7027..187dc54 100644
--- a/examples/sampled_function/sampled_function.cc
+++ b/examples/sampled_function/sampled_function.cc
@@ -35,19 +35,12 @@
#include "ceres/cubic_interpolation.h"
#include "glog/logging.h"
-using ceres::AutoDiffCostFunction;
-using ceres::CostFunction;
-using ceres::CubicInterpolator;
-using ceres::Grid1D;
-using ceres::Problem;
-using ceres::Solve;
-using ceres::Solver;
+using Interpolator = ceres::CubicInterpolator<ceres::Grid1D<double>>;
// A simple cost functor that interfaces an interpolated table of
// values with automatic differentiation.
struct InterpolatedCostFunctor {
- explicit InterpolatedCostFunctor(
- const CubicInterpolator<Grid1D<double>>& interpolator)
+ explicit InterpolatedCostFunctor(const Interpolator& interpolator)
: interpolator_(interpolator) {}
template <typename T>
@@ -56,14 +49,13 @@
return true;
}
- static CostFunction* Create(
- const CubicInterpolator<Grid1D<double>>& interpolator) {
- return new AutoDiffCostFunction<InterpolatedCostFunctor, 1, 1>(
+ static ceres::CostFunction* Create(const Interpolator& interpolator) {
+ return new ceres::AutoDiffCostFunction<InterpolatedCostFunctor, 1, 1>(
new InterpolatedCostFunctor(interpolator));
}
private:
- const CubicInterpolator<Grid1D<double>>& interpolator_;
+ const Interpolator& interpolator_;
};
int main(int argc, char** argv) {
@@ -76,18 +68,19 @@
values[i] = (i - 4.5) * (i - 4.5);
}
- Grid1D<double> array(values, 0, kNumSamples);
- CubicInterpolator<Grid1D<double>> interpolator(array);
+ ceres::Grid1D<double> array(values, 0, kNumSamples);
+ Interpolator interpolator(array);
double x = 1.0;
- Problem problem;
- CostFunction* cost_function = InterpolatedCostFunctor::Create(interpolator);
+ ceres::Problem problem;
+ ceres::CostFunction* cost_function =
+ InterpolatedCostFunctor::Create(interpolator);
problem.AddResidualBlock(cost_function, nullptr, &x);
- Solver::Options options;
+ ceres::Solver::Options options;
options.minimizer_progress_to_stdout = true;
- Solver::Summary summary;
- Solve(options, &problem, &summary);
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
std::cout << "Expected x: 4.5. Actual x : " << x << std::endl;
return 0;