blob: 6aa30d7b739fbc71be23a8ee38397823ecff61f9 [file] [log] [blame]
Keir Mierle8ebb0732012-04-30 23:09:08 -07001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30// keir@google.com (Keir Mierle)
31
32#ifndef CERES_INTERNAL_EVALUATOR_H_
33#define CERES_INTERNAL_EVALUATOR_H_
34
35#include <string>
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070036#include <vector>
Keir Mierle8ebb0732012-04-30 23:09:08 -070037#include "ceres/internal/port.h"
38#include "ceres/types.h"
39
40namespace ceres {
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070041
42class CRSMatrix;
43
Keir Mierle8ebb0732012-04-30 23:09:08 -070044namespace internal {
45
46class Program;
47class SparseMatrix;
48
49// The Evaluator interface offers a way to interact with a least squares cost
50// function that is useful for an optimizer that wants to minimize the least
51// squares objective. This insulates the optimizer from issues like Jacobian
52// storage, parameterization, etc.
53class Evaluator {
54 public:
55 virtual ~Evaluator();
56
57 struct Options {
58 Options()
59 : num_threads(1),
60 num_eliminate_blocks(-1),
61 linear_solver_type(DENSE_QR) {}
62
63 int num_threads;
64 int num_eliminate_blocks;
65 LinearSolverType linear_solver_type;
66 };
67
68 static Evaluator* Create(const Options& options,
69 Program* program,
70 string* error);
71
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070072
73 // This is used for computing the cost, residual and Jacobian for
74 // returning to the user. For actually solving the optimization
75 // problem, the optimization algorithm uses the ProgramEvaluator
76 // objects directly.
77 //
78 // The residual, gradients and jacobian pointers can be NULL, in
79 // which case they will not be evaluated. cost cannot be NULL.
80 //
81 // The parallelism of the evaluator is controlled by num_threads; it
82 // should be at least 1.
83 //
84 // Note: That this function does not take a parameter vector as
85 // input. The parameter blocks are evaluated on the values contained
86 // in the arrays pointed to by their user_state pointers.
87 //
88 // Also worth noting is that this function mutates program by
89 // calling Program::SetParameterOffsetsAndIndex() on it so that an
90 // evaluator object can be constructed.
91 static bool Evaluate(Program* program,
92 int num_threads,
93 double* cost,
94 vector<double>* residuals,
95 vector<double>* gradient,
96 CRSMatrix* jacobian);
97
Keir Mierle8ebb0732012-04-30 23:09:08 -070098 // Build and return a sparse matrix for storing and working with the Jacobian
99 // of the objective function. The jacobian has dimensions
100 // NumEffectiveParameters() by NumParameters(), and is typically extremely
101 // sparse. Since the sparsity pattern of the Jacobian remains constant over
102 // the lifetime of the optimization problem, this method is used to
103 // instantiate a SparseMatrix object with the appropriate sparsity structure
104 // (which can be an expensive operation) and then reused by the optimization
105 // algorithm and the various linear solvers.
106 //
107 // It is expected that the classes implementing this interface will be aware
108 // of their client's requirements for the kind of sparse matrix storage and
109 // layout that is needed for an efficient implementation. For example
110 // CompressedRowOptimizationProblem creates a compressed row representation of
111 // the jacobian for use with CHOLMOD, where as BlockOptimizationProblem
112 // creates a BlockSparseMatrix representation of the jacobian for use in the
113 // Schur complement based methods.
114 virtual SparseMatrix* CreateJacobian() const = 0;
115
116 // Evaluate the cost function for the given state. Returns the cost,
117 // residuals, and jacobian in the corresponding arguments. Both residuals and
118 // jacobian are optional; to avoid computing them, pass NULL.
119 //
120 // If non-NULL, the Jacobian must have a suitable sparsity pattern; only the
121 // values array of the jacobian is modified.
122 //
123 // state is an array of size NumParameters(), cost is a pointer to a single
124 // double, and residuals is an array of doubles of size NumResiduals().
125 virtual bool Evaluate(const double* state,
126 double* cost,
127 double* residuals,
Keir Mierlef44907f2012-07-06 13:52:32 -0700128 double* gradient,
Keir Mierle8ebb0732012-04-30 23:09:08 -0700129 SparseMatrix* jacobian) = 0;
130
131 // Make a change delta (of size NumEffectiveParameters()) to state (of size
132 // NumParameters()) and store the result in state_plus_delta.
133 //
134 // In the case that there are no parameterizations used, this is equivalent to
135 //
136 // state_plus_delta[i] = state[i] + delta[i] ;
137 //
138 // however, the mapping is more complicated in the case of parameterizations
139 // like quaternions. This is the same as the "Plus()" operation in
140 // local_parameterization.h, but operating over the entire state vector for a
141 // problem.
142 virtual bool Plus(const double* state,
143 const double* delta,
144 double* state_plus_delta) const = 0;
145
146 // The number of parameters in the optimization problem.
147 virtual int NumParameters() const = 0;
148
149 // This is the effective number of parameters that the optimizer may adjust.
150 // This applies when there are parameterizations on some of the parameters.
151 virtual int NumEffectiveParameters() const = 0;
152
153 // The number of residuals in the optimization problem.
154 virtual int NumResiduals() const = 0;
155};
156
157} // namespace internal
158} // namespace ceres
159
160#endif // CERES_INTERNAL_EVALUATOR_H_