blob: fea307919d07c52f2a1f626d04572f38f05c64c2 [file] [log] [blame]
Keir Mierle8ebb0732012-04-30 23:09:08 -07001// Ceres Solver - A fast non-linear least squares minimizer
Keir Mierle7492b0d2015-03-17 22:30:16 -07002// Copyright 2015 Google Inc. All rights reserved.
3// http://ceres-solver.org/
Keir Mierle8ebb0732012-04-30 23:09:08 -07004//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30// keir@google.com (Keir Mierle)
31
32#ifndef CERES_INTERNAL_EVALUATOR_H_
33#define CERES_INTERNAL_EVALUATOR_H_
34
Sameer Agarwal509f68c2013-02-20 01:39:03 -080035#include <map>
Keir Mierle8ebb0732012-04-30 23:09:08 -070036#include <string>
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070037#include <vector>
Sameer Agarwalbdd87c02013-01-29 16:24:31 -080038
39#include "ceres/execution_summary.h"
Keir Mierle8ebb0732012-04-30 23:09:08 -070040#include "ceres/internal/port.h"
41#include "ceres/types.h"
42
43namespace ceres {
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070044
Sameer Agarwald3ace022012-09-23 18:19:49 -070045struct CRSMatrix;
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070046
Keir Mierle8ebb0732012-04-30 23:09:08 -070047namespace internal {
48
49class Program;
50class SparseMatrix;
51
52// The Evaluator interface offers a way to interact with a least squares cost
53// function that is useful for an optimizer that wants to minimize the least
54// squares objective. This insulates the optimizer from issues like Jacobian
55// storage, parameterization, etc.
56class Evaluator {
57 public:
58 virtual ~Evaluator();
59
60 struct Options {
61 Options()
62 : num_threads(1),
63 num_eliminate_blocks(-1),
Richard Stebbing32530782014-04-26 07:42:23 +010064 linear_solver_type(DENSE_QR),
65 dynamic_sparsity(false) {}
Keir Mierle8ebb0732012-04-30 23:09:08 -070066
67 int num_threads;
68 int num_eliminate_blocks;
69 LinearSolverType linear_solver_type;
Richard Stebbing32530782014-04-26 07:42:23 +010070 bool dynamic_sparsity;
Keir Mierle8ebb0732012-04-30 23:09:08 -070071 };
72
73 static Evaluator* Create(const Options& options,
74 Program* program,
Sameer Agarwal05a07ec2015-01-07 15:10:46 -080075 std::string* error);
Keir Mierle8ebb0732012-04-30 23:09:08 -070076
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070077 // This is used for computing the cost, residual and Jacobian for
78 // returning to the user. For actually solving the optimization
79 // problem, the optimization algorithm uses the ProgramEvaluator
80 // objects directly.
81 //
82 // The residual, gradients and jacobian pointers can be NULL, in
83 // which case they will not be evaluated. cost cannot be NULL.
84 //
85 // The parallelism of the evaluator is controlled by num_threads; it
86 // should be at least 1.
87 //
88 // Note: That this function does not take a parameter vector as
89 // input. The parameter blocks are evaluated on the values contained
90 // in the arrays pointed to by their user_state pointers.
91 //
92 // Also worth noting is that this function mutates program by
93 // calling Program::SetParameterOffsetsAndIndex() on it so that an
94 // evaluator object can be constructed.
95 static bool Evaluate(Program* program,
96 int num_threads,
97 double* cost,
Sameer Agarwalbcc865f2014-12-17 07:35:09 -080098 std::vector<double>* residuals,
99 std::vector<double>* gradient,
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700100 CRSMatrix* jacobian);
101
Keir Mierle8ebb0732012-04-30 23:09:08 -0700102 // Build and return a sparse matrix for storing and working with the Jacobian
103 // of the objective function. The jacobian has dimensions
104 // NumEffectiveParameters() by NumParameters(), and is typically extremely
105 // sparse. Since the sparsity pattern of the Jacobian remains constant over
106 // the lifetime of the optimization problem, this method is used to
107 // instantiate a SparseMatrix object with the appropriate sparsity structure
108 // (which can be an expensive operation) and then reused by the optimization
109 // algorithm and the various linear solvers.
110 //
111 // It is expected that the classes implementing this interface will be aware
112 // of their client's requirements for the kind of sparse matrix storage and
113 // layout that is needed for an efficient implementation. For example
114 // CompressedRowOptimizationProblem creates a compressed row representation of
115 // the jacobian for use with CHOLMOD, where as BlockOptimizationProblem
116 // creates a BlockSparseMatrix representation of the jacobian for use in the
117 // Schur complement based methods.
118 virtual SparseMatrix* CreateJacobian() const = 0;
119
Sameer Agarwal039ff072013-02-26 09:15:39 -0800120
121 // Options struct to control Evaluator::Evaluate;
122 struct EvaluateOptions {
123 EvaluateOptions()
124 : apply_loss_function(true) {
125 }
126
127 // If false, the loss function correction is not applied to the
128 // residual blocks.
129 bool apply_loss_function;
130 };
131
Keir Mierle8ebb0732012-04-30 23:09:08 -0700132 // Evaluate the cost function for the given state. Returns the cost,
133 // residuals, and jacobian in the corresponding arguments. Both residuals and
134 // jacobian are optional; to avoid computing them, pass NULL.
135 //
136 // If non-NULL, the Jacobian must have a suitable sparsity pattern; only the
137 // values array of the jacobian is modified.
138 //
139 // state is an array of size NumParameters(), cost is a pointer to a single
140 // double, and residuals is an array of doubles of size NumResiduals().
Sameer Agarwal039ff072013-02-26 09:15:39 -0800141 virtual bool Evaluate(const EvaluateOptions& evaluate_options,
142 const double* state,
Keir Mierle8ebb0732012-04-30 23:09:08 -0700143 double* cost,
144 double* residuals,
Keir Mierlef44907f2012-07-06 13:52:32 -0700145 double* gradient,
Keir Mierle8ebb0732012-04-30 23:09:08 -0700146 SparseMatrix* jacobian) = 0;
147
Sameer Agarwal039ff072013-02-26 09:15:39 -0800148 // Variant of Evaluator::Evaluate where the user wishes to use the
149 // default EvaluateOptions struct. This is mostly here as a
150 // convenience method.
Sameer Agarwal6fb10242013-02-26 22:20:18 -0800151 bool Evaluate(const double* state,
152 double* cost,
153 double* residuals,
154 double* gradient,
155 SparseMatrix* jacobian) {
Sameer Agarwal039ff072013-02-26 09:15:39 -0800156 return Evaluate(EvaluateOptions(),
157 state,
158 cost,
159 residuals,
160 gradient,
161 jacobian);
162 }
163
Keir Mierle8ebb0732012-04-30 23:09:08 -0700164 // Make a change delta (of size NumEffectiveParameters()) to state (of size
165 // NumParameters()) and store the result in state_plus_delta.
166 //
167 // In the case that there are no parameterizations used, this is equivalent to
168 //
169 // state_plus_delta[i] = state[i] + delta[i] ;
170 //
171 // however, the mapping is more complicated in the case of parameterizations
172 // like quaternions. This is the same as the "Plus()" operation in
173 // local_parameterization.h, but operating over the entire state vector for a
174 // problem.
175 virtual bool Plus(const double* state,
176 const double* delta,
177 double* state_plus_delta) const = 0;
178
179 // The number of parameters in the optimization problem.
180 virtual int NumParameters() const = 0;
181
182 // This is the effective number of parameters that the optimizer may adjust.
183 // This applies when there are parameterizations on some of the parameters.
184 virtual int NumEffectiveParameters() const = 0;
185
186 // The number of residuals in the optimization problem.
187 virtual int NumResiduals() const = 0;
Sameer Agarwalbdd87c02013-01-29 16:24:31 -0800188
Sameer Agarwal42a84b82013-02-01 12:22:53 -0800189 // The following two methods return copies instead of references so
190 // that the base class implementation does not have to worry about
191 // life time issues. Further, these calls are not expected to be
192 // frequent or performance sensitive.
Sameer Agarwal05a07ec2015-01-07 15:10:46 -0800193 virtual std::map<std::string, int> CallStatistics() const {
194 return std::map<std::string, int>();
Sameer Agarwal42a84b82013-02-01 12:22:53 -0800195 }
196
Sameer Agarwal05a07ec2015-01-07 15:10:46 -0800197 virtual std::map<std::string, double> TimeStatistics() const {
198 return std::map<std::string, double>();
Sameer Agarwal42a84b82013-02-01 12:22:53 -0800199 }
Keir Mierle8ebb0732012-04-30 23:09:08 -0700200};
201
202} // namespace internal
203} // namespace ceres
204
205#endif // CERES_INTERNAL_EVALUATOR_H_