blob: de56ac25ff63bd01b5df71f3276cd2c296e3951a [file] [log] [blame]
Keir Mierle8ebb0732012-04-30 23:09:08 -07001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: keir@google.com (Keir Mierle)
30//
31// The ProgramEvaluator runs the cost functions contained in each residual block
32// and stores the result into a jacobian. The particular type of jacobian is
33// abstracted out using two template parameters:
34//
35// - An "EvaluatePreparer" that is responsible for creating the array with
36// pointers to the jacobian blocks where the cost function evaluates to.
37// - A "JacobianWriter" that is responsible for storing the resulting
38// jacobian blocks in the passed sparse matrix.
39//
Keir Mierlecc387742012-05-03 01:27:50 -070040// This abstraction affords an efficient evaluator implementation while still
41// supporting writing to multiple sparse matrix formats. For example, when the
42// ProgramEvaluator is parameterized for writing to block sparse matrices, the
43// residual jacobians are written directly into their final position in the
44// block sparse matrix by the user's CostFunction; there is no copying.
45//
46// The evaluation is threaded with OpenMP.
Keir Mierle8ebb0732012-04-30 23:09:08 -070047//
48// The EvaluatePreparer and JacobianWriter interfaces are as follows:
49//
50// class EvaluatePreparer {
51// // Prepare the jacobians array for use as the destination of a call to
52// // a cost function's evaluate method.
53// void Prepare(const ResidualBlock* residual_block,
54// int residual_block_index,
55// SparseMatrix* jacobian,
56// double** jacobians);
57// }
58//
59// class JacobianWriter {
60// // Create a jacobian that this writer can write. Same as
61// // Evaluator::CreateJacobian.
62// SparseMatrix* CreateJacobian() const;
63//
64// // Create num_threads evaluate preparers. Caller owns result which must
65// // be freed with delete[]. Resulting preparers are valid while *this is.
66// EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
67//
68// // Write the block jacobians from a residual block evaluation to the
69// // larger sparse jacobian.
70// void Write(int residual_id,
71// int residual_offset,
72// double** jacobians,
73// SparseMatrix* jacobian);
74// }
75//
76// Note: The ProgramEvaluator is not thread safe, since internally it maintains
77// some per-thread scratch space.
78
79#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
80#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
81
82#ifdef CERES_USE_OPENMP
83#include <omp.h>
84#endif
85
Sameer Agarwal509f68c2013-02-20 01:39:03 -080086#include <map>
87#include <vector>
88#include "ceres/execution_summary.h"
89#include "ceres/internal/eigen.h"
90#include "ceres/internal/scoped_ptr.h"
Keir Mierle8ebb0732012-04-30 23:09:08 -070091#include "ceres/parameter_block.h"
92#include "ceres/program.h"
93#include "ceres/residual_block.h"
Keir Mierle8ebb0732012-04-30 23:09:08 -070094
95namespace ceres {
96namespace internal {
97
98template<typename EvaluatePreparer, typename JacobianWriter>
99class ProgramEvaluator : public Evaluator {
100 public:
101 ProgramEvaluator(const Evaluator::Options &options, Program* program)
102 : options_(options),
103 program_(program),
104 jacobian_writer_(options, program),
105 evaluate_preparers_(
106 jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
107#ifndef CERES_USE_OPENMP
108 CHECK_EQ(1, options_.num_threads)
109 << "OpenMP support is not compiled into this binary; "
110 << "only options.num_threads=1 is supported.";
111#endif
112
113 BuildResidualLayout(*program, &residual_layout_);
114 evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
115 options.num_threads));
116 }
117
118 // Implementation of Evaluator interface.
119 SparseMatrix* CreateJacobian() const {
120 return jacobian_writer_.CreateJacobian();
121 }
122
Sameer Agarwal039ff072013-02-26 09:15:39 -0800123 bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
124 const double* state,
Keir Mierle8ebb0732012-04-30 23:09:08 -0700125 double* cost,
126 double* residuals,
Keir Mierlef44907f2012-07-06 13:52:32 -0700127 double* gradient,
Keir Mierle8ebb0732012-04-30 23:09:08 -0700128 SparseMatrix* jacobian) {
Sameer Agarwal42a84b82013-02-01 12:22:53 -0800129 ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
130 ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
131 ? "Evaluator::Residual"
132 : "Evaluator::Jacobian",
133 &execution_summary_);
134
Keir Mierle8ebb0732012-04-30 23:09:08 -0700135 // The parameters are stateful, so set the state before evaluating.
136 if (!program_->StateVectorToParameterBlocks(state)) {
137 return false;
138 }
139
Sameer Agarwal319ef462012-05-22 20:44:52 -0700140 if (residuals != NULL) {
141 VectorRef(residuals, program_->NumResiduals()).setZero();
Sameer Agarwal9123e2f2012-09-18 21:49:06 -0700142 }
Sameer Agarwal319ef462012-05-22 20:44:52 -0700143
144 if (jacobian != NULL) {
Keir Mierle8ebb0732012-04-30 23:09:08 -0700145 jacobian->SetZero();
146 }
147
148 // Each thread gets it's own cost and evaluate scratch space.
149 for (int i = 0; i < options_.num_threads; ++i) {
150 evaluate_scratch_[i].cost = 0.0;
Sameer Agarwal31432ae2012-11-25 17:01:44 -0800151 if (gradient != NULL) {
152 VectorRef(evaluate_scratch_[i].gradient.get(),
153 program_->NumEffectiveParameters()).setZero();
154 }
Keir Mierle8ebb0732012-04-30 23:09:08 -0700155 }
156
157 // This bool is used to disable the loop if an error is encountered
158 // without breaking out of it. The remaining loop iterations are still run,
159 // but with an empty body, and so will finish quickly.
160 bool abort = false;
161 int num_residual_blocks = program_->NumResidualBlocks();
162#pragma omp parallel for num_threads(options_.num_threads)
163 for (int i = 0; i < num_residual_blocks; ++i) {
164// Disable the loop instead of breaking, as required by OpenMP.
165#pragma omp flush(abort)
166 if (abort) {
167 continue;
168 }
169
170#ifdef CERES_USE_OPENMP
171 int thread_id = omp_get_thread_num();
172#else
173 int thread_id = 0;
174#endif
175 EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
176 EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
177
178 // Prepare block residuals if requested.
179 const ResidualBlock* residual_block = program_->residual_blocks()[i];
Keir Mierlef44907f2012-07-06 13:52:32 -0700180 double* block_residuals = NULL;
181 if (residuals != NULL) {
182 block_residuals = residuals + residual_layout_[i];
183 } else if (gradient != NULL) {
184 block_residuals = scratch->residual_block_residuals.get();
185 }
Keir Mierle8ebb0732012-04-30 23:09:08 -0700186
187 // Prepare block jacobians if requested.
188 double** block_jacobians = NULL;
Keir Mierlef44907f2012-07-06 13:52:32 -0700189 if (jacobian != NULL || gradient != NULL) {
Keir Mierle8ebb0732012-04-30 23:09:08 -0700190 preparer->Prepare(residual_block,
191 i,
192 jacobian,
193 scratch->jacobian_block_ptrs.get());
194 block_jacobians = scratch->jacobian_block_ptrs.get();
195 }
196
197 // Evaluate the cost, residuals, and jacobians.
198 double block_cost;
Keir Mierlef44907f2012-07-06 13:52:32 -0700199 if (!residual_block->Evaluate(
Sameer Agarwal039ff072013-02-26 09:15:39 -0800200 evaluate_options.apply_loss_function,
Keir Mierlef44907f2012-07-06 13:52:32 -0700201 &block_cost,
202 block_residuals,
203 block_jacobians,
204 scratch->residual_block_evaluate_scratch.get())) {
Keir Mierle8ebb0732012-04-30 23:09:08 -0700205 abort = true;
206// This ensures that the OpenMP threads have a consistent view of 'abort'. Do
207// the flush inside the failure case so that there is usually only one
208// synchronization point per loop iteration instead of two.
209#pragma omp flush(abort)
210 continue;
211 }
212
213 scratch->cost += block_cost;
214
Keir Mierlef44907f2012-07-06 13:52:32 -0700215 // Store the jacobians, if they were requested.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700216 if (jacobian != NULL) {
217 jacobian_writer_.Write(i,
218 residual_layout_[i],
219 block_jacobians,
220 jacobian);
221 }
Keir Mierlef44907f2012-07-06 13:52:32 -0700222
223 // Compute and store the gradient, if it was requested.
224 if (gradient != NULL) {
225 int num_residuals = residual_block->NumResiduals();
226 int num_parameter_blocks = residual_block->NumParameterBlocks();
227 for (int j = 0; j < num_parameter_blocks; ++j) {
228 const ParameterBlock* parameter_block =
229 residual_block->parameter_blocks()[j];
230 if (parameter_block->IsConstant()) {
231 continue;
232 }
233 MatrixRef block_jacobian(block_jacobians[j],
234 num_residuals,
235 parameter_block->LocalSize());
236 VectorRef block_gradient(scratch->gradient.get() +
237 parameter_block->delta_offset(),
238 parameter_block->LocalSize());
239 VectorRef block_residual(block_residuals, num_residuals);
240 block_gradient += block_residual.transpose() * block_jacobian;
241 }
242 }
Keir Mierle8ebb0732012-04-30 23:09:08 -0700243 }
244
245 if (!abort) {
Keir Mierlef44907f2012-07-06 13:52:32 -0700246 // Sum the cost and gradient (if requested) from each thread.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700247 (*cost) = 0.0;
Keir Mierlef44907f2012-07-06 13:52:32 -0700248 int num_parameters = program_->NumEffectiveParameters();
249 if (gradient != NULL) {
250 VectorRef(gradient, num_parameters).setZero();
251 }
Keir Mierle8ebb0732012-04-30 23:09:08 -0700252 for (int i = 0; i < options_.num_threads; ++i) {
253 (*cost) += evaluate_scratch_[i].cost;
Keir Mierlef44907f2012-07-06 13:52:32 -0700254 if (gradient != NULL) {
255 VectorRef(gradient, num_parameters) +=
256 VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
257 }
Keir Mierle8ebb0732012-04-30 23:09:08 -0700258 }
259 }
260 return !abort;
261 }
262
263 bool Plus(const double* state,
264 const double* delta,
265 double* state_plus_delta) const {
266 return program_->Plus(state, delta, state_plus_delta);
267 }
268
269 int NumParameters() const {
270 return program_->NumParameters();
271 }
272 int NumEffectiveParameters() const {
273 return program_->NumEffectiveParameters();
274 }
275
276 int NumResiduals() const {
277 return program_->NumResiduals();
278 }
279
Sameer Agarwal42a84b82013-02-01 12:22:53 -0800280 virtual map<string, int> CallStatistics() const {
281 return execution_summary_.calls();
282 }
283
284 virtual map<string, double> TimeStatistics() const {
285 return execution_summary_.times();
286 }
287
Keir Mierle8ebb0732012-04-30 23:09:08 -0700288 private:
Keir Mierlef44907f2012-07-06 13:52:32 -0700289 // Per-thread scratch space needed to evaluate and store each residual block.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700290 struct EvaluateScratch {
291 void Init(int max_parameters_per_residual_block,
Keir Mierlef44907f2012-07-06 13:52:32 -0700292 int max_scratch_doubles_needed_for_evaluate,
293 int max_residuals_per_residual_block,
294 int num_parameters) {
295 residual_block_evaluate_scratch.reset(
296 new double[max_scratch_doubles_needed_for_evaluate]);
297 gradient.reset(new double[num_parameters]);
298 VectorRef(gradient.get(), num_parameters).setZero();
299 residual_block_residuals.reset(
300 new double[max_residuals_per_residual_block]);
Keir Mierle8ebb0732012-04-30 23:09:08 -0700301 jacobian_block_ptrs.reset(
302 new double*[max_parameters_per_residual_block]);
Keir Mierle8ebb0732012-04-30 23:09:08 -0700303 }
304
305 double cost;
Keir Mierlef44907f2012-07-06 13:52:32 -0700306 scoped_array<double> residual_block_evaluate_scratch;
307 // The gradient in the local parameterization.
308 scoped_array<double> gradient;
309 // Enough space to store the residual for the largest residual block.
310 scoped_array<double> residual_block_residuals;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700311 scoped_array<double*> jacobian_block_ptrs;
312 };
313
314 static void BuildResidualLayout(const Program& program,
315 vector<int>* residual_layout) {
316 const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
317 residual_layout->resize(program.NumResidualBlocks());
318 int residual_pos = 0;
319 for (int i = 0; i < residual_blocks.size(); ++i) {
320 const int num_residuals = residual_blocks[i]->NumResiduals();
321 (*residual_layout)[i] = residual_pos;
322 residual_pos += num_residuals;
323 }
324 }
325
326 // Create scratch space for each thread evaluating the program.
327 static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
328 int num_threads) {
329 int max_parameters_per_residual_block =
330 program.MaxParametersPerResidualBlock();
331 int max_scratch_doubles_needed_for_evaluate =
332 program.MaxScratchDoublesNeededForEvaluate();
Keir Mierlef44907f2012-07-06 13:52:32 -0700333 int max_residuals_per_residual_block =
334 program.MaxResidualsPerResidualBlock();
335 int num_parameters = program.NumEffectiveParameters();
Keir Mierle8ebb0732012-04-30 23:09:08 -0700336
337 EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
338 for (int i = 0; i < num_threads; i++) {
339 evaluate_scratch[i].Init(max_parameters_per_residual_block,
Keir Mierlef44907f2012-07-06 13:52:32 -0700340 max_scratch_doubles_needed_for_evaluate,
341 max_residuals_per_residual_block,
342 num_parameters);
Keir Mierle8ebb0732012-04-30 23:09:08 -0700343 }
344 return evaluate_scratch;
345 }
346
347 Evaluator::Options options_;
348 Program* program_;
349 JacobianWriter jacobian_writer_;
350 scoped_array<EvaluatePreparer> evaluate_preparers_;
351 scoped_array<EvaluateScratch> evaluate_scratch_;
352 vector<int> residual_layout_;
Sameer Agarwal42a84b82013-02-01 12:22:53 -0800353 ::ceres::internal::ExecutionSummary execution_summary_;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700354};
355
356} // namespace internal
357} // namespace ceres
358
359#endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_