blob: 16c7387bd4c7adaae1554b6bde453c68dffd80d1 [file] [log] [blame]
Keir Mierle8ebb0732012-04-30 23:09:08 -07001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#ifndef CERES_PUBLIC_SOLVER_H_
32#define CERES_PUBLIC_SOLVER_H_
33
34#include <cmath>
35#include <string>
36#include <vector>
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070037#include "ceres/crs_matrix.h"
Keir Mierle8ebb0732012-04-30 23:09:08 -070038#include "ceres/internal/macros.h"
39#include "ceres/internal/port.h"
Sameer Agarwal4997cbc2012-07-02 12:44:34 -070040#include "ceres/iteration_callback.h"
Keir Mierle8ebb0732012-04-30 23:09:08 -070041#include "ceres/types.h"
42
43namespace ceres {
44
45class Problem;
46
47// Interface for non-linear least squares solvers.
48class Solver {
49 public:
50 virtual ~Solver();
51
52 // The options structure contains, not surprisingly, options that control how
53 // the solver operates. The defaults should be suitable for a wide range of
54 // problems; however, better performance is often obtainable with tweaking.
55 //
56 // The constants are defined inside types.h
57 struct Options {
58 // Default constructor that sets up a generic sparse problem.
59 Options() {
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070060 trust_region_strategy_type = LEVENBERG_MARQUARDT;
Markus Moll51cf7cb2012-08-20 20:10:20 +020061 dogleg_type = TRADITIONAL_DOGLEG;
Sameer Agarwala8f87d72012-08-08 10:38:31 -070062 use_nonmonotonic_steps = false;
63 max_consecutive_nonmonotonic_steps = 5;
Keir Mierle8ebb0732012-04-30 23:09:08 -070064 max_num_iterations = 50;
Sameer Agarwalfa015192012-06-11 14:21:42 -070065 max_solver_time_in_seconds = 1e9;
Keir Mierle8ebb0732012-04-30 23:09:08 -070066 num_threads = 1;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070067 initial_trust_region_radius = 1e4;
68 max_trust_region_radius = 1e16;
69 min_trust_region_radius = 1e-32;
Keir Mierle8ebb0732012-04-30 23:09:08 -070070 min_relative_decrease = 1e-3;
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -070071 lm_min_diagonal = 1e-6;
72 lm_max_diagonal = 1e32;
73 max_num_consecutive_invalid_steps = 5;
Keir Mierle8ebb0732012-04-30 23:09:08 -070074 function_tolerance = 1e-6;
75 gradient_tolerance = 1e-10;
76 parameter_tolerance = 1e-8;
Sameer Agarwalb0518732012-05-29 00:27:57 -070077
78#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
Keir Mierle8ebb0732012-04-30 23:09:08 -070079 linear_solver_type = DENSE_QR;
Sameer Agarwalb0518732012-05-29 00:27:57 -070080#else
81 linear_solver_type = SPARSE_NORMAL_CHOLESKY;
82#endif
83
Sameer Agarwal97fb6d92012-06-17 10:08:19 -070084 preconditioner_type = JACOBI;
85
Sameer Agarwalb0518732012-05-29 00:27:57 -070086 sparse_linear_algebra_library = SUITE_SPARSE;
87#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
88 sparse_linear_algebra_library = CX_SPARSE;
89#endif
90
Sameer Agarwal97fb6d92012-06-17 10:08:19 -070091 num_linear_solver_threads = 1;
92 num_eliminate_blocks = 0;
93 ordering_type = NATURAL;
94
Sameer Agarwal7a3c43b2012-06-05 23:10:59 -070095#if defined(CERES_NO_SUITESPARSE)
96 use_block_amd = false;
97#else
98 use_block_amd = true;
99#endif
100
Keir Mierle8ebb0732012-04-30 23:09:08 -0700101 linear_solver_min_num_iterations = 1;
102 linear_solver_max_num_iterations = 500;
103 eta = 1e-1;
104 jacobi_scaling = true;
105 logging_type = PER_MINIMIZER_ITERATION;
106 minimizer_progress_to_stdout = false;
107 return_initial_residuals = false;
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700108 return_initial_gradient = false;
109 return_initial_jacobian = false;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700110 return_final_residuals = false;
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700111 return_final_gradient = false;
112 return_final_jacobian = false;
Sameer Agarwal82f4b882012-05-06 21:05:28 -0700113 lsqp_dump_directory = "/tmp";
114 lsqp_dump_format_type = TEXTFILE;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700115 check_gradients = false;
116 gradient_check_relative_precision = 1e-8;
117 numeric_derivative_relative_step_size = 1e-6;
118 update_state_every_iteration = false;
119 }
120
121 // Minimizer options ----------------------------------------
122
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700123 TrustRegionStrategyType trust_region_strategy_type;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700124
Markus Moll51cf7cb2012-08-20 20:10:20 +0200125 // Type of dogleg strategy to use.
126 DoglegType dogleg_type;
127
Sameer Agarwala8f87d72012-08-08 10:38:31 -0700128 // The classical trust region methods are descent methods, in that
129 // they only accept a point if it strictly reduces the value of
130 // the objective function.
131 //
132 // Relaxing this requirement allows the algorithm to be more
133 // efficient in the long term at the cost of some local increase
134 // in the value of the objective function.
135 //
136 // This is because allowing for non-decreasing objective function
137 // values in a princpled manner allows the algorithm to "jump over
138 // boulders" as the method is not restricted to move into narrow
139 // valleys while preserving its convergence properties.
140 //
141 // Setting use_nonmonotonic_steps to true enables the
142 // non-monotonic trust region algorithm as described by Conn,
143 // Gould & Toint in "Trust Region Methods", Section 10.1.
144 //
145 // The parameter max_consecutive_nonmonotonic_steps controls the
146 // window size used by the step selection algorithm to accept
147 // non-monotonic steps.
148 //
149 // Even though the value of the objective function may be larger
150 // than the minimum value encountered over the course of the
151 // optimization, the final parameters returned to the user are the
152 // ones corresponding to the minimum cost over all iterations.
153 bool use_nonmonotonic_steps;
154 int max_consecutive_nonmonotonic_steps;
155
Keir Mierle8ebb0732012-04-30 23:09:08 -0700156 // Maximum number of iterations for the minimizer to run for.
157 int max_num_iterations;
158
159 // Maximum time for which the minimizer should run for.
Sameer Agarwalfa015192012-06-11 14:21:42 -0700160 double max_solver_time_in_seconds;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700161
162 // Number of threads used by Ceres for evaluating the cost and
163 // jacobians.
164 int num_threads;
165
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700166 // Trust region minimizer settings.
167 double initial_trust_region_radius;
168 double max_trust_region_radius;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700169
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700170 // Minimizer terminates when the trust region radius becomes
171 // smaller than this value.
172 double min_trust_region_radius;
Sameer Agarwal835911e2012-05-14 12:41:10 -0700173
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700174 // Lower bound for the relative decrease before a step is
175 // accepted.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700176 double min_relative_decrease;
177
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700178 // For the Levenberg-Marquadt algorithm, the scaled diagonal of
179 // the normal equations J'J is used to control the size of the
180 // trust region. Extremely small and large values along the
181 // diagonal can make this regularization scheme
182 // fail. lm_max_diagonal and lm_min_diagonal, clamp the values of
183 // diag(J'J) from above and below. In the normal course of
184 // operation, the user should not have to modify these parameters.
185 double lm_min_diagonal;
186 double lm_max_diagonal;
187
188 // Sometimes due to numerical conditioning problems or linear
189 // solver flakiness, the trust region strategy may return a
190 // numerically invalid step that can be fixed by reducing the
191 // trust region size. So the TrustRegionMinimizer allows for a few
192 // successive invalid steps before it declares NUMERICAL_FAILURE.
193 int max_num_consecutive_invalid_steps;
194
Keir Mierle8ebb0732012-04-30 23:09:08 -0700195 // Minimizer terminates when
196 //
197 // (new_cost - old_cost) < function_tolerance * old_cost;
198 //
199 double function_tolerance;
200
201 // Minimizer terminates when
202 //
203 // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
204 //
205 // This value should typically be 1e-4 * function_tolerance.
206 double gradient_tolerance;
207
208 // Minimizer terminates when
209 //
210 // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
211 //
212 double parameter_tolerance;
213
214 // Linear least squares solver options -------------------------------------
215
216 LinearSolverType linear_solver_type;
217
218 // Type of preconditioner to use with the iterative linear solvers.
219 PreconditionerType preconditioner_type;
220
Sameer Agarwalb0518732012-05-29 00:27:57 -0700221 // Ceres supports using multiple sparse linear algebra libraries
222 // for sparse matrix ordering and factorizations. Currently,
223 // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
224 // whether they are linked into Ceres at build time.
225 SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
226
Keir Mierle8ebb0732012-04-30 23:09:08 -0700227 // Number of threads used by Ceres to solve the Newton
228 // step. Currently only the SPARSE_SCHUR solver is capable of
229 // using this setting.
230 int num_linear_solver_threads;
231
232 // For Schur reduction based methods, the first 0 to num blocks are
233 // eliminated using the Schur reduction. For example, when solving
234 // traditional structure from motion problems where the parameters are in
235 // two classes (cameras and points) then num_eliminate_blocks would be the
236 // number of points.
237 //
238 // This parameter is used in conjunction with the ordering.
239 // Applies to: Preprocessor and linear least squares solver.
240 int num_eliminate_blocks;
241
242 // Internally Ceres reorders the parameter blocks to help the
243 // various linear solvers. This parameter allows the user to
244 // influence the re-ordering strategy used. For structure from
245 // motion problems use SCHUR, for other problems NATURAL (default)
246 // is a good choice. In case you wish to specify your own ordering
247 // scheme, for example in conjunction with num_eliminate_blocks,
248 // use USER.
249 OrderingType ordering_type;
250
251 // The ordering of the parameter blocks. The solver pays attention
252 // to it if the ordering_type is set to USER and the vector is
253 // non-empty.
254 vector<double*> ordering;
255
Sameer Agarwal7a3c43b2012-06-05 23:10:59 -0700256 // By virtue of the modeling layer in Ceres being block oriented,
257 // all the matrices used by Ceres are also block oriented. When
258 // doing sparse direct factorization of these matrices (for
259 // SPARSE_NORMAL_CHOLESKY, SPARSE_SCHUR and ITERATIVE in
260 // conjunction with CLUSTER_TRIDIAGONAL AND CLUSTER_JACOBI
261 // preconditioners), the fill-reducing ordering algorithms can
262 // either be run on the block or the scalar form of these matrices.
263 // Running it on the block form exposes more of the super-nodal
264 // structure of the matrix to the factorization routines. Setting
265 // this parameter to true runs the ordering algorithms in block
266 // form. Currently this option only makes sense with
267 // sparse_linear_algebra_library = SUITE_SPARSE.
268 bool use_block_amd;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700269
270 // Minimum number of iterations for which the linear solver should
271 // run, even if the convergence criterion is satisfied.
272 int linear_solver_min_num_iterations;
273
274 // Maximum number of iterations for which the linear solver should
275 // run. If the solver does not converge in less than
276 // linear_solver_max_num_iterations, then it returns
277 // MAX_ITERATIONS, as its termination type.
278 int linear_solver_max_num_iterations;
279
280 // Forcing sequence parameter. The truncated Newton solver uses
281 // this number to control the relative accuracy with which the
282 // Newton step is computed.
283 //
284 // This constant is passed to ConjugateGradientsSolver which uses
285 // it to terminate the iterations when
286 //
287 // (Q_i - Q_{i-1})/Q_i < eta/i
288 double eta;
289
290 // Normalize the jacobian using Jacobi scaling before calling
291 // the linear least squares solver.
292 bool jacobi_scaling;
293
294 // Logging options ---------------------------------------------------------
295
296 LoggingType logging_type;
297
298 // By default the Minimizer progress is logged to VLOG(1), which
299 // is sent to STDERR depending on the vlog level. If this flag is
300 // set to true, and logging_type is not SILENT, the logging output
301 // is sent to STDOUT.
302 bool minimizer_progress_to_stdout;
303
304 bool return_initial_residuals;
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700305 bool return_initial_gradient;
306 bool return_initial_jacobian;
307
Keir Mierle8ebb0732012-04-30 23:09:08 -0700308 bool return_final_residuals;
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700309 bool return_final_gradient;
310 bool return_final_jacobian;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700311
312 // List of iterations at which the optimizer should dump the
313 // linear least squares problem to disk. Useful for testing and
314 // benchmarking. If empty (default), no problems are dumped.
315 //
316 // This is ignored if protocol buffers are disabled.
317 vector<int> lsqp_iterations_to_dump;
Sameer Agarwal82f4b882012-05-06 21:05:28 -0700318 string lsqp_dump_directory;
319 DumpFormatType lsqp_dump_format_type;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700320
Keir Mierle8ebb0732012-04-30 23:09:08 -0700321 // Finite differences options ----------------------------------------------
322
323 // Check all jacobians computed by each residual block with finite
324 // differences. This is expensive since it involves computing the
325 // derivative by normal means (e.g. user specified, autodiff,
326 // etc), then also computing it using finite differences. The
327 // results are compared, and if they differ substantially, details
328 // are printed to the log.
329 bool check_gradients;
330
331 // Relative precision to check for in the gradient checker. If the
332 // relative difference between an element in a jacobian exceeds
333 // this number, then the jacobian for that cost term is dumped.
334 double gradient_check_relative_precision;
335
336 // Relative shift used for taking numeric derivatives. For finite
337 // differencing, each dimension is evaluated at slightly shifted
338 // values; for the case of central difference, this is what gets
339 // evaluated:
340 //
341 // delta = numeric_derivative_relative_step_size;
342 // f_initial = f(x)
343 // f_forward = f((1 + delta) * x)
344 // f_backward = f((1 - delta) * x)
345 //
346 // The finite differencing is done along each dimension. The
347 // reason to use a relative (rather than absolute) step size is
348 // that this way, numeric differentation works for functions where
349 // the arguments are typically large (e.g. 1e9) and when the
350 // values are small (e.g. 1e-5). It is possible to construct
351 // "torture cases" which break this finite difference heuristic,
352 // but they do not come up often in practice.
353 //
354 // TODO(keir): Pick a smarter number than the default above! In
355 // theory a good choice is sqrt(eps) * x, which for doubles means
356 // about 1e-8 * x. However, I have found this number too
357 // optimistic. This number should be exposed for users to change.
358 double numeric_derivative_relative_step_size;
359
360 // If true, the user's parameter blocks are updated at the end of
361 // every Minimizer iteration, otherwise they are updated when the
362 // Minimizer terminates. This is useful if, for example, the user
363 // wishes to visualize the state of the optimization every
364 // iteration.
365 bool update_state_every_iteration;
366
367 // Callbacks that are executed at the end of each iteration of the
Sameer Agarwalaa9a83c2012-05-29 17:40:17 -0700368 // Minimizer. An iteration may terminate midway, either due to
369 // numerical failures or because one of the convergence tests has
370 // been satisfied. In this case none of the callbacks are
371 // executed.
372
373 // Callbacks are executed in the order that they are specified in
374 // this vector. By default, parameter blocks are updated only at
375 // the end of the optimization, i.e when the Minimizer
376 // terminates. This behaviour is controlled by
Keir Mierle8ebb0732012-04-30 23:09:08 -0700377 // update_state_every_variable. If the user wishes to have access
378 // to the update parameter blocks when his/her callbacks are
379 // executed, then set update_state_every_iteration to true.
380 //
381 // The solver does NOT take ownership of these pointers.
382 vector<IterationCallback*> callbacks;
Sameer Agarwal1b7f3b52012-08-09 21:46:19 -0700383
384 // If non-empty, a summary of the execution of the solver is
385 // recorded to this file.
386 string solver_log;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700387 };
388
389 struct Summary {
390 Summary();
391
392 // A brief one line description of the state of the solver after
393 // termination.
394 string BriefReport() const;
395
396 // A full multiline description of the state of the solver after
397 // termination.
398 string FullReport() const;
399
400 // Minimizer summary -------------------------------------------------
401 SolverTerminationType termination_type;
402
403 // If the solver did not run, or there was a failure, a
404 // description of the error.
405 string error;
406
407 // Cost of the problem before and after the optimization. See
408 // problem.h for definition of the cost of a problem.
409 double initial_cost;
410 double final_cost;
411
412 // The part of the total cost that comes from residual blocks that
413 // were held fixed by the preprocessor because all the parameter
414 // blocks that they depend on were fixed.
415 double fixed_cost;
416
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700417 // Vectors of residuals before and after the optimization. The
418 // entries of these vectors are in the order in which
419 // ResidualBlocks were added to the Problem object.
420 //
421 // Whether the residual vectors are populated with values is
422 // controlled by Solver::Options::return_initial_residuals and
423 // Solver::Options::return_final_residuals respectively.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700424 vector<double> initial_residuals;
425 vector<double> final_residuals;
426
Sameer Agarwal4997cbc2012-07-02 12:44:34 -0700427 // Gradient vectors, before and after the optimization. The rows
428 // are in the same order in which the ParameterBlocks were added
429 // to the Problem object.
430 //
431 // NOTE: Since AddResidualBlock adds ParameterBlocks to the
432 // Problem automatically if they do not already exist, if you wish
433 // to have explicit control over the ordering of the vectors, then
434 // use Problem::AddParameterBlock to explicitly add the
435 // ParameterBlocks in the order desired.
436 //
437 // Whether the vectors are populated with values is controlled by
438 // Solver::Options::return_initial_gradient and
439 // Solver::Options::return_final_gradient respectively.
440 vector<double> initial_gradient;
441 vector<double> final_gradient;
442
443 // Jacobian matrices before and after the optimization. The rows
444 // of these matrices are in the same order in which the
445 // ResidualBlocks were added to the Problem object. The columns
446 // are in the same order in which the ParameterBlocks were added
447 // to the Problem object.
448 //
449 // NOTE: Since AddResidualBlock adds ParameterBlocks to the
450 // Problem automatically if they do not already exist, if you wish
451 // to have explicit control over the column ordering of the
452 // matrix, then use Problem::AddParameterBlock to explicitly add
453 // the ParameterBlocks in the order desired.
454 //
455 // The Jacobian matrices are stored as compressed row sparse
456 // matrices. Please see ceres/crs_matrix.h for more details of the
457 // format.
458 //
459 // Whether the Jacboan matrices are populated with values is
460 // controlled by Solver::Options::return_initial_jacobian and
461 // Solver::Options::return_final_jacobian respectively.
462 CRSMatrix initial_jacobian;
463 CRSMatrix final_jacobian;
464
Keir Mierle8ebb0732012-04-30 23:09:08 -0700465 vector<IterationSummary> iterations;
466
467 int num_successful_steps;
468 int num_unsuccessful_steps;
469
Sameer Agarwalfa015192012-06-11 14:21:42 -0700470 // When the user calls Solve, before the actual optimization
471 // occurs, Ceres performs a number of preprocessing steps. These
472 // include error checks, memory allocations, and reorderings. This
473 // time is accounted for as preprocessing time.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700474 double preprocessor_time_in_seconds;
Sameer Agarwalfa015192012-06-11 14:21:42 -0700475
476 // Time spent in the TrustRegionMinimizer.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700477 double minimizer_time_in_seconds;
Sameer Agarwalfa015192012-06-11 14:21:42 -0700478
479 // After the Minimizer is finished, some time is spent in
480 // re-evaluating residuals etc. This time is accounted for in the
481 // postprocessor time.
482 double postprocessor_time_in_seconds;
483
484 // Some total of all time spent inside Ceres when Solve is called.
Keir Mierle8ebb0732012-04-30 23:09:08 -0700485 double total_time_in_seconds;
486
487 // Preprocessor summary.
488 int num_parameter_blocks;
489 int num_parameters;
490 int num_residual_blocks;
491 int num_residuals;
492
493 int num_parameter_blocks_reduced;
494 int num_parameters_reduced;
495 int num_residual_blocks_reduced;
496 int num_residuals_reduced;
497
498 int num_eliminate_blocks_given;
499 int num_eliminate_blocks_used;
500
501 int num_threads_given;
502 int num_threads_used;
503
504 int num_linear_solver_threads_given;
505 int num_linear_solver_threads_used;
506
507 LinearSolverType linear_solver_type_given;
508 LinearSolverType linear_solver_type_used;
509
510 PreconditionerType preconditioner_type;
511 OrderingType ordering_type;
Sameer Agarwal97fb6d92012-06-17 10:08:19 -0700512
513 TrustRegionStrategyType trust_region_strategy_type;
514 SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
Keir Mierle8ebb0732012-04-30 23:09:08 -0700515 };
516
517 // Once a least squares problem has been built, this function takes
518 // the problem and optimizes it based on the values of the options
519 // parameters. Upon return, a detailed summary of the work performed
520 // by the preprocessor, the non-linear minmizer and the linear
521 // solver are reported in the summary object.
522 virtual void Solve(const Options& options,
523 Problem* problem,
524 Solver::Summary* summary);
525};
526
527// Helper function which avoids going through the interface.
528void Solve(const Solver::Options& options,
529 Problem* problem,
530 Solver::Summary* summary);
531
532} // namespace ceres
533
534#endif // CERES_PUBLIC_SOLVER_H_