blob: 9f7000050a894e2d04206c8113c02a37d0c6d003 [file] [log] [blame]
Sameer Agarwal40df20b2013-10-03 10:40:55 -07001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: mierle@gmail.com (Keir Mierle)
30// sameeragarwal@google.com (Sameer Agarwal)
31// thadh@gmail.com (Thad Hughes)
32//
33// This numeric diff implementation differs from the one found in
34// numeric_diff_cost_function.h by supporting numericdiff on cost
35// functions with variable numbers of parameters with variable
36// sizes. With the other implementation, all the sizes (both the
37// number of parameter blocks and the size of each block) must be
38// fixed at compile time.
39//
40// The functor API differs slightly from the API for fixed size
41// numeric diff; the expected interface for the cost functors is:
42//
43// struct MyCostFunctor {
44// template<typename T>
45// bool operator()(double const* const* parameters, double* residuals) const {
46// // Use parameters[i] to access the i'th parameter block.
47// }
48// }
49//
50// Since the sizing of the parameters is done at runtime, you must
51// also specify the sizes after creating the
52// DynamicNumericDiffCostFunction. For example:
53//
54// DynamicAutoDiffCostFunction<MyCostFunctor, CENTRAL> cost_function(
55// new MyCostFunctor());
56// cost_function.AddParameterBlock(5);
57// cost_function.AddParameterBlock(10);
58// cost_function.SetNumResiduals(21);
59
60#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
61#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
62
63#include <cmath>
64#include <numeric>
65#include <vector>
66
67#include "ceres/cost_function.h"
68#include "ceres/internal/scoped_ptr.h"
69#include "ceres/internal/eigen.h"
70#include "glog/logging.h"
71
72namespace ceres {
73
74template <typename CostFunctor, NumericDiffMethod method = CENTRAL>
75class DynamicNumericDiffCostFunction : public CostFunction {
76 public:
77 explicit DynamicNumericDiffCostFunction(CostFunctor* functor,
78 Ownership ownership = TAKE_OWNERSHIP,
79 double relative_step_size = 1e-6)
80 : functor_(functor),
81 ownership_(ownership),
82 relative_step_size_(relative_step_size) {
83 }
84
85 virtual ~DynamicNumericDiffCostFunction() {
86 if (ownership_ != TAKE_OWNERSHIP) {
87 functor_.release();
88 }
89 }
90
91 void AddParameterBlock(int size) {
92 mutable_parameter_block_sizes()->push_back(size);
93 }
94
95 void SetNumResiduals(int num_residuals) {
96 set_num_residuals(num_residuals);
97 }
98
99 virtual bool Evaluate(double const* const* parameters,
100 double* residuals,
101 double** jacobians) const {
102 CHECK_GT(num_residuals(), 0)
103 << "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
104 << "before DynamicNumericDiffCostFunction::Evaluate().";
105
106 const vector<int16>& block_sizes = parameter_block_sizes();
107 CHECK(!block_sizes.empty())
108 << "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
109 << "before DynamicNumericDiffCostFunction::Evaluate().";
110
Sameer Agarwal10ac7d82013-10-03 14:37:07 -0700111 const bool status = (*functor_)(parameters, residuals);
112 if (jacobians == NULL || !status) {
Sameer Agarwal40df20b2013-10-03 10:40:55 -0700113 return status;
114 }
115
116 // Create local space for a copy of the parameters which will get mutated.
117 int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
118 vector<double> parameters_copy(parameters_size);
119 vector<double*> parameters_references_copy(block_sizes.size());
120 parameters_references_copy[0] = &parameters_copy[0];
121 for (int block = 1; block < block_sizes.size(); ++block) {
122 parameters_references_copy[block] = parameters_references_copy[block - 1]
123 + block_sizes[block - 1];
124 }
125
126 // Copy the parameters into the local temp space.
127 for (int block = 0; block < block_sizes.size(); ++block) {
128 memcpy(parameters_references_copy[block],
129 parameters[block],
130 block_sizes[block] * sizeof(*parameters[block]));
131 }
132
133 for (int block = 0; block < block_sizes.size(); ++block) {
134 if (jacobians[block] != NULL &&
135 !EvaluateJacobianForParameterBlock(block_sizes[block],
136 block,
137 relative_step_size_,
138 residuals,
139 &parameters_references_copy[0],
140 jacobians)) {
141 return false;
142 }
143 }
144 return true;
145 }
146
147 private:
148 bool EvaluateJacobianForParameterBlock(const int parameter_block_size,
149 const int parameter_block,
150 const double relative_step_size,
151 double const* residuals_at_eval_point,
152 double** parameters,
153 double** jacobians) const {
154 using Eigen::Map;
155 using Eigen::Matrix;
156 using Eigen::Dynamic;
157 using Eigen::RowMajor;
158
159 typedef Matrix<double, Dynamic, 1> ResidualVector;
160 typedef Matrix<double, Dynamic, 1> ParameterVector;
161 typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
162
163 int num_residuals = this->num_residuals();
164
165 Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
166 num_residuals,
167 parameter_block_size);
168
169 // Mutate one element at a time and then restore.
170 Map<ParameterVector> x_plus_delta(parameters[parameter_block],
171 parameter_block_size);
172 ParameterVector x(x_plus_delta);
173 ParameterVector step_size = x.array().abs() * relative_step_size;
174
175 // To handle cases where a paremeter is exactly zero, instead use
176 // the mean step_size for the other dimensions.
177 double fallback_step_size = step_size.sum() / step_size.rows();
178 if (fallback_step_size == 0.0) {
179 // If all the parameters are zero, there's no good answer. Use the given
180 // relative step_size as absolute step_size and hope for the best.
181 fallback_step_size = relative_step_size;
182 }
183
184 // For each parameter in the parameter block, use finite
185 // differences to compute the derivative for that parameter.
186 for (int j = 0; j < parameter_block_size; ++j) {
187 if (step_size(j) == 0.0) {
188 // The parameter is exactly zero, so compromise and use the
189 // mean step_size from the other parameters. This can break in
190 // many cases, but it's hard to pick a good number without
191 // problem specific knowledge.
192 step_size(j) = fallback_step_size;
193 }
194 x_plus_delta(j) = x(j) + step_size(j);
195
196 ResidualVector residuals(num_residuals);
197 if (!(*functor_)(parameters, &residuals[0])) {
198 // Something went wrong; bail.
199 return false;
200 }
201
202 // Compute this column of the jacobian in 3 steps:
203 // 1. Store residuals for the forward part.
204 // 2. Subtract residuals for the backward (or 0) part.
205 // 3. Divide out the run.
206 parameter_jacobian.col(j) = residuals;
207
208 double one_over_h = 1 / step_size(j);
209 if (method == CENTRAL) {
210 // Compute the function on the other side of x(j).
211 x_plus_delta(j) = x(j) - step_size(j);
212
213 if (!(*functor_)(parameters, &residuals[0])) {
214 // Something went wrong; bail.
215 return false;
216 }
217
218 parameter_jacobian.col(j) -= residuals;
219 one_over_h /= 2;
220 } else {
221 // Forward difference only; reuse existing residuals evaluation.
222 parameter_jacobian.col(j) -=
223 Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
224 }
225 x_plus_delta(j) = x(j); // Restore x_plus_delta.
226
227 // Divide out the run to get slope.
228 parameter_jacobian.col(j) *= one_over_h;
229 }
230 return true;
231 }
232
233 internal::scoped_ptr<CostFunctor> functor_;
234 Ownership ownership_;
235 const double relative_step_size_;
236};
237
238} // namespace ceres
239
240#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_