fix formatting for (non-generated) internal source files

- Change formatting standard to Cpp11. Main difference is not having
  the space between two closing >> for nested templates. We don't
  choose c++14, because older versions of clang-format (version 9
  and earlier) don't know this value yet, and it doesn't make a
  difference in the formatting.
- Apply clang-format to all (non generated) internal source files.
- Manually fix some code sections (clang-format on/off) and c-strings
- Exclude some embedded external files with very different formatting
  (gtest/gmock)
- Add script to format all source files

Change-Id: Ic6cea41575ad6e37c9e136dbce176b0d505dc44d
diff --git a/.clang-format b/.clang-format
index e28c3fd..21cad9e 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,4 +1,5 @@
 BasedOnStyle: Google
+Standard: Cpp11
 BinPackArguments: false
 BinPackParameters: false
 PointerAlignment: Left
diff --git a/examples/bal_problem.h b/examples/bal_problem.h
index 81e8844..e6d4ace 100644
--- a/examples/bal_problem.h
+++ b/examples/bal_problem.h
@@ -65,18 +65,20 @@
                const double translation_sigma,
                const double point_sigma);
 
-  int camera_block_size() const { return use_quaternions_ ? 10 : 9; }
-  int point_block_size() const { return 3; }
-  int num_cameras() const { return num_cameras_; }
-  int num_points() const { return num_points_; }
-  int num_observations() const { return num_observations_; }
-  int num_parameters() const { return num_parameters_; }
-  const int* point_index() const { return point_index_; }
-  const int* camera_index() const { return camera_index_; }
-  const double* observations() const { return observations_; }
-  const double* parameters() const { return parameters_; }
-  const double* cameras() const { return parameters_; }
-  double* mutable_cameras() { return parameters_; }
+  // clang-format off
+  int camera_block_size()      const { return use_quaternions_ ? 10 : 9; }
+  int point_block_size()       const { return 3;                         }
+  int num_cameras()            const { return num_cameras_;              }
+  int num_points()             const { return num_points_;               }
+  int num_observations()       const { return num_observations_;         }
+  int num_parameters()         const { return num_parameters_;           }
+  const int* point_index()     const { return point_index_;              }
+  const int* camera_index()    const { return camera_index_;             }
+  const double* observations() const { return observations_;             }
+  const double* parameters()   const { return parameters_;               }
+  const double* cameras()      const { return parameters_;               }
+  double* mutable_cameras()          { return parameters_;               }
+  // clang-format on
   double* mutable_points() {
     return parameters_ + camera_block_size() * num_cameras_;
   }
diff --git a/examples/fields_of_experts.h b/examples/fields_of_experts.h
index d7f1a4a..429881d 100644
--- a/examples/fields_of_experts.h
+++ b/examples/fields_of_experts.h
@@ -125,7 +125,7 @@
   // The coefficients in front of each term.
   std::vector<double> alpha_;
   // The filters used for the dot product with image patches.
-  std::vector<std::vector<double> > filters_;
+  std::vector<std::vector<double>> filters_;
 };
 
 }  // namespace examples
diff --git a/examples/nist.cc b/examples/nist.cc
index 937d6bd..977b69d 100644
--- a/examples/nist.cc
+++ b/examples/nist.cc
@@ -167,7 +167,7 @@
 void SplitStringUsingChar(const string& full,
                           const char delim,
                           vector<string>* result) {
-  std::back_insert_iterator<vector<string> > it(*result);
+  std::back_insert_iterator<vector<string>> it(*result);
 
   const char* p = full.data();
   const char* end = p + full.size();
@@ -606,7 +606,7 @@
       ceres::TinySolverCostFunctionAdapter<Eigen::Dynamic, num_parameters> cfa(
           *cost_function);
       typedef ceres::TinySolver<
-          ceres::TinySolverCostFunctionAdapter<Eigen::Dynamic, num_parameters> >
+          ceres::TinySolverCostFunctionAdapter<Eigen::Dynamic, num_parameters>>
           Solver;
       Solver solver;
       solver.options.max_num_iterations = FLAGS_num_iterations;
diff --git a/examples/sampled_function/sampled_function.cc b/examples/sampled_function/sampled_function.cc
index 7dec42b..e96018d 100644
--- a/examples/sampled_function/sampled_function.cc
+++ b/examples/sampled_function/sampled_function.cc
@@ -47,7 +47,7 @@
 // values with automatic differentiation.
 struct InterpolatedCostFunctor {
   explicit InterpolatedCostFunctor(
-      const CubicInterpolator<Grid1D<double> >& interpolator)
+      const CubicInterpolator<Grid1D<double>>& interpolator)
       : interpolator_(interpolator) {}
 
   template <typename T>
@@ -57,13 +57,13 @@
   }
 
   static CostFunction* Create(
-      const CubicInterpolator<Grid1D<double> >& interpolator) {
+      const CubicInterpolator<Grid1D<double>>& interpolator) {
     return new AutoDiffCostFunction<InterpolatedCostFunctor, 1, 1>(
         new InterpolatedCostFunctor(interpolator));
   }
 
  private:
-  const CubicInterpolator<Grid1D<double> >& interpolator_;
+  const CubicInterpolator<Grid1D<double>>& interpolator_;
 };
 
 int main(int argc, char** argv) {
@@ -77,7 +77,7 @@
   }
 
   Grid1D<double> array(values, 0, kNumSamples);
-  CubicInterpolator<Grid1D<double> > interpolator(array);
+  CubicInterpolator<Grid1D<double>> interpolator(array);
 
   double x = 1.0;
   Problem problem;
diff --git a/examples/slam/pose_graph_2d/pose_graph_2d_error_term.h b/examples/slam/pose_graph_2d/pose_graph_2d_error_term.h
index 5d404e3..2df31f6 100644
--- a/examples/slam/pose_graph_2d/pose_graph_2d_error_term.h
+++ b/examples/slam/pose_graph_2d/pose_graph_2d_error_term.h
@@ -78,7 +78,7 @@
     const Eigen::Matrix<T, 2, 1> p_a(*x_a, *y_a);
     const Eigen::Matrix<T, 2, 1> p_b(*x_b, *y_b);
 
-    Eigen::Map<Eigen::Matrix<T, 3, 1> > residuals_map(residuals_ptr);
+    Eigen::Map<Eigen::Matrix<T, 3, 1>> residuals_map(residuals_ptr);
 
     residuals_map.template head<2>() =
         RotationMatrix2D(*yaw_a).transpose() * (p_b - p_a) - p_ab_.cast<T>();
diff --git a/examples/slam/pose_graph_3d/pose_graph_3d.cc b/examples/slam/pose_graph_3d/pose_graph_3d.cc
index c54412d..2f8d6a4 100644
--- a/examples/slam/pose_graph_3d/pose_graph_3d.cc
+++ b/examples/slam/pose_graph_3d/pose_graph_3d.cc
@@ -133,14 +133,14 @@
   for (std::map<int,
                 Pose3d,
                 std::less<int>,
-                Eigen::aligned_allocator<std::pair<const int, Pose3d> > >::
+                Eigen::aligned_allocator<std::pair<const int, Pose3d>>>::
            const_iterator poses_iter = poses.begin();
        poses_iter != poses.end();
        ++poses_iter) {
     const std::map<int,
                    Pose3d,
                    std::less<int>,
-                   Eigen::aligned_allocator<std::pair<const int, Pose3d> > >::
+                   Eigen::aligned_allocator<std::pair<const int, Pose3d>>>::
         value_type& pair = *poses_iter;
     outfile << pair.first << " " << pair.second.p.transpose() << " "
             << pair.second.q.x() << " " << pair.second.q.y() << " "
diff --git a/examples/slam/pose_graph_3d/pose_graph_3d_error_term.h b/examples/slam/pose_graph_3d/pose_graph_3d_error_term.h
index c8def17..1f3e8de 100644
--- a/examples/slam/pose_graph_3d/pose_graph_3d_error_term.h
+++ b/examples/slam/pose_graph_3d/pose_graph_3d_error_term.h
@@ -79,11 +79,11 @@
                   const T* const p_b_ptr,
                   const T* const q_b_ptr,
                   T* residuals_ptr) const {
-    Eigen::Map<const Eigen::Matrix<T, 3, 1> > p_a(p_a_ptr);
-    Eigen::Map<const Eigen::Quaternion<T> > q_a(q_a_ptr);
+    Eigen::Map<const Eigen::Matrix<T, 3, 1>> p_a(p_a_ptr);
+    Eigen::Map<const Eigen::Quaternion<T>> q_a(q_a_ptr);
 
-    Eigen::Map<const Eigen::Matrix<T, 3, 1> > p_b(p_b_ptr);
-    Eigen::Map<const Eigen::Quaternion<T> > q_b(q_b_ptr);
+    Eigen::Map<const Eigen::Matrix<T, 3, 1>> p_b(p_b_ptr);
+    Eigen::Map<const Eigen::Quaternion<T>> q_b(q_b_ptr);
 
     // Compute the relative transformation between the two frames.
     Eigen::Quaternion<T> q_a_inverse = q_a.conjugate();
@@ -99,7 +99,7 @@
     // Compute the residuals.
     // [ position         ]   [ delta_p          ]
     // [ orientation (3x1)] = [ 2 * delta_q(0:2) ]
-    Eigen::Map<Eigen::Matrix<T, 6, 1> > residuals(residuals_ptr);
+    Eigen::Map<Eigen::Matrix<T, 6, 1>> residuals(residuals_ptr);
     residuals.template block<3, 1>(0, 0) =
         p_ab_estimated - t_ab_measured_.p.template cast<T>();
     residuals.template block<3, 1>(3, 0) = T(2.0) * delta_q.vec();
diff --git a/examples/slam/pose_graph_3d/types.h b/examples/slam/pose_graph_3d/types.h
index 25f7ba2..d3f19ed 100644
--- a/examples/slam/pose_graph_3d/types.h
+++ b/examples/slam/pose_graph_3d/types.h
@@ -64,7 +64,7 @@
 typedef std::map<int,
                  Pose3d,
                  std::less<int>,
-                 Eigen::aligned_allocator<std::pair<const int, Pose3d> > >
+                 Eigen::aligned_allocator<std::pair<const int, Pose3d>>>
     MapOfPoses;
 
 // The constraint between two vertices in the pose graph. The constraint is the
@@ -103,7 +103,7 @@
   return input;
 }
 
-typedef std::vector<Constraint3d, Eigen::aligned_allocator<Constraint3d> >
+typedef std::vector<Constraint3d, Eigen::aligned_allocator<Constraint3d>>
     VectorOfConstraints;
 
 }  // namespace examples
diff --git a/internal/ceres/accelerate_sparse.cc b/internal/ceres/accelerate_sparse.cc
index eb04e71..d2b642b 100644
--- a/internal/ceres/accelerate_sparse.cc
+++ b/internal/ceres/accelerate_sparse.cc
@@ -33,18 +33,19 @@
 
 #ifndef CERES_NO_ACCELERATE_SPARSE
 
-#include "ceres/accelerate_sparse.h"
-
 #include <algorithm>
 #include <string>
 #include <vector>
 
+#include "ceres/accelerate_sparse.h"
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/triplet_sparse_matrix.h"
 #include "glog/logging.h"
 
-#define CASESTR(x) case x: return #x
+#define CASESTR(x) \
+  case x:          \
+    return #x
 
 namespace ceres {
 namespace internal {
@@ -68,7 +69,7 @@
 // aligned to kAccelerateRequiredAlignment and returns a pointer to the
 // aligned start.
 void* ResizeForAccelerateAlignment(const size_t required_size,
-                                   std::vector<uint8_t> *workspace) {
+                                   std::vector<uint8_t>* workspace) {
   // As per the Accelerate documentation, all workspace memory passed to the
   // sparse solver functions must be 16-byte aligned.
   constexpr int kAccelerateRequiredAlignment = 16;
@@ -80,29 +81,28 @@
   size_t size_from_aligned_start = workspace->size();
   void* aligned_solve_workspace_start =
       reinterpret_cast<void*>(workspace->data());
-  aligned_solve_workspace_start =
-      std::align(kAccelerateRequiredAlignment,
-                 required_size,
-                 aligned_solve_workspace_start,
-                 size_from_aligned_start);
+  aligned_solve_workspace_start = std::align(kAccelerateRequiredAlignment,
+                                             required_size,
+                                             aligned_solve_workspace_start,
+                                             size_from_aligned_start);
   CHECK(aligned_solve_workspace_start != nullptr)
       << "required_size: " << required_size
       << ", workspace size: " << workspace->size();
   return aligned_solve_workspace_start;
 }
 
-template<typename Scalar>
+template <typename Scalar>
 void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
                                      DenseVector* rhs_and_solution) {
   // From SparseSolve() documentation in Solve.h
-  const int required_size =
-      numeric_factor->solveWorkspaceRequiredStatic +
-      numeric_factor->solveWorkspaceRequiredPerRHS;
-  SparseSolve(*numeric_factor, *rhs_and_solution,
+  const int required_size = numeric_factor->solveWorkspaceRequiredStatic +
+                            numeric_factor->solveWorkspaceRequiredPerRHS;
+  SparseSolve(*numeric_factor,
+              *rhs_and_solution,
               ResizeForAccelerateAlignment(required_size, &solve_workspace_));
 }
 
-template<typename Scalar>
+template <typename Scalar>
 typename AccelerateSparse<Scalar>::ASSparseMatrix
 AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
     CompressedRowSparseMatrix* A) {
@@ -112,7 +112,7 @@
   //
   // Accelerate's columnStarts is a long*, not an int*.  These types might be
   // different (e.g. ARM on iOS) so always make a copy.
-  column_starts_.resize(A->num_rows() +1); // +1 for final column length.
+  column_starts_.resize(A->num_rows() + 1);  // +1 for final column length.
   std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
 
   ASSparseMatrix At;
@@ -136,29 +136,31 @@
   return At;
 }
 
-template<typename Scalar>
+template <typename Scalar>
 typename AccelerateSparse<Scalar>::SymbolicFactorization
 AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
   return SparseFactor(SparseFactorizationCholesky, A->structure);
 }
 
-template<typename Scalar>
+template <typename Scalar>
 typename AccelerateSparse<Scalar>::NumericFactorization
 AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
                                    SymbolicFactorization* symbolic_factor) {
   return SparseFactor(*symbolic_factor, *A);
 }
 
-template<typename Scalar>
+template <typename Scalar>
 void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
                                         NumericFactorization* numeric_factor) {
   // From SparseRefactor() documentation in Solve.h
-  const int required_size = std::is_same<Scalar, double>::value
-      ? numeric_factor->symbolicFactorization.workspaceSize_Double
-      : numeric_factor->symbolicFactorization.workspaceSize_Float;
-  return SparseRefactor(*A, numeric_factor,
-                        ResizeForAccelerateAlignment(required_size,
-                                                     &factorization_workspace_));
+  const int required_size =
+      std::is_same<Scalar, double>::value
+          ? numeric_factor->symbolicFactorization.workspaceSize_Double
+          : numeric_factor->symbolicFactorization.workspaceSize_Float;
+  return SparseRefactor(
+      *A,
+      numeric_factor,
+      ResizeForAccelerateAlignment(required_size, &factorization_workspace_));
 }
 
 // Instantiate only for the specific template types required/supported s/t the
@@ -166,34 +168,33 @@
 template class AccelerateSparse<double>;
 template class AccelerateSparse<float>;
 
-template<typename Scalar>
-std::unique_ptr<SparseCholesky>
-AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
+template <typename Scalar>
+std::unique_ptr<SparseCholesky> AppleAccelerateCholesky<Scalar>::Create(
+    OrderingType ordering_type) {
   return std::unique_ptr<SparseCholesky>(
       new AppleAccelerateCholesky<Scalar>(ordering_type));
 }
 
-template<typename Scalar>
+template <typename Scalar>
 AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
     const OrderingType ordering_type)
     : ordering_type_(ordering_type) {}
 
-template<typename Scalar>
+template <typename Scalar>
 AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
   FreeSymbolicFactorization();
   FreeNumericFactorization();
 }
 
-template<typename Scalar>
+template <typename Scalar>
 CompressedRowSparseMatrix::StorageType
 AppleAccelerateCholesky<Scalar>::StorageType() const {
   return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
 }
 
-template<typename Scalar>
-LinearSolverTerminationType
-AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
-                                           std::string* message) {
+template <typename Scalar>
+LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
+    CompressedRowSparseMatrix* lhs, std::string* message) {
   CHECK_EQ(lhs->storage_type(), StorageType());
   if (lhs == NULL) {
     *message = "Failure: Input lhs is NULL.";
@@ -234,11 +235,9 @@
   return LINEAR_SOLVER_SUCCESS;
 }
 
-template<typename Scalar>
-LinearSolverTerminationType
-AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
-                                       double* solution,
-                                       std::string* message) {
+template <typename Scalar>
+LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Solve(
+    const double* rhs, double* solution, std::string* message) {
   CHECK_EQ(numeric_factor_->status, SparseStatusOK)
       << "Solve called without a call to Factorize first ("
       << SparseStatusToString(numeric_factor_->status) << ").";
@@ -262,7 +261,7 @@
   return LINEAR_SOLVER_SUCCESS;
 }
 
-template<typename Scalar>
+template <typename Scalar>
 void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
   if (symbolic_factor_) {
     SparseCleanup(*symbolic_factor_);
@@ -270,7 +269,7 @@
   }
 }
 
-template<typename Scalar>
+template <typename Scalar>
 void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
   if (numeric_factor_) {
     SparseCleanup(*numeric_factor_);
@@ -283,7 +282,7 @@
 template class AppleAccelerateCholesky<double>;
 template class AppleAccelerateCholesky<float>;
 
-}
-}
+}  // namespace internal
+}  // namespace ceres
 
 #endif  // CERES_NO_ACCELERATE_SPARSE
diff --git a/internal/ceres/accelerate_sparse.h b/internal/ceres/accelerate_sparse.h
index 43b4ea5..e53758d 100644
--- a/internal/ceres/accelerate_sparse.h
+++ b/internal/ceres/accelerate_sparse.h
@@ -40,9 +40,9 @@
 #include <string>
 #include <vector>
 
+#include "Accelerate.h"
 #include "ceres/linear_solver.h"
 #include "ceres/sparse_cholesky.h"
-#include "Accelerate.h"
 
 namespace ceres {
 namespace internal {
@@ -50,11 +50,10 @@
 class CompressedRowSparseMatrix;
 class TripletSparseMatrix;
 
-template<typename Scalar>
-struct SparseTypesTrait {
-};
+template <typename Scalar>
+struct SparseTypesTrait {};
 
-template<>
+template <>
 struct SparseTypesTrait<double> {
   typedef DenseVector_Double DenseVector;
   typedef SparseMatrix_Double SparseMatrix;
@@ -62,7 +61,7 @@
   typedef SparseOpaqueFactorization_Double NumericFactorization;
 };
 
-template<>
+template <>
 struct SparseTypesTrait<float> {
   typedef DenseVector_Float DenseVector;
   typedef SparseMatrix_Float SparseMatrix;
@@ -70,14 +69,16 @@
   typedef SparseOpaqueFactorization_Float NumericFactorization;
 };
 
-template<typename Scalar>
+template <typename Scalar>
 class AccelerateSparse {
  public:
   using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
   // Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
   using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
-  using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
-  using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
+  using SymbolicFactorization =
+      typename SparseTypesTrait<Scalar>::SymbolicFactorization;
+  using NumericFactorization =
+      typename SparseTypesTrait<Scalar>::NumericFactorization;
 
   // Solves a linear system given its symbolic (reference counted within
   // NumericFactorization) and numeric factorization.
@@ -109,7 +110,7 @@
 
 // An implementation of SparseCholesky interface using Apple's Accelerate
 // framework.
-template<typename Scalar>
+template <typename Scalar>
 class AppleAccelerateCholesky : public SparseCholesky {
  public:
   // Factory
@@ -122,7 +123,7 @@
                                         std::string* message) final;
   LinearSolverTerminationType Solve(const double* rhs,
                                     double* solution,
-                                    std::string* message) final ;
+                                    std::string* message) final;
 
  private:
   AppleAccelerateCholesky(const OrderingType ordering_type);
@@ -132,15 +133,15 @@
   const OrderingType ordering_type_;
   AccelerateSparse<Scalar> as_;
   std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
-  symbolic_factor_;
+      symbolic_factor_;
   std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
-  numeric_factor_;
+      numeric_factor_;
   // Copy of rhs/solution if Scalar != double (necessitating a copy).
   Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
 };
 
-}
-}
+}  // namespace internal
+}  // namespace ceres
 
 #endif  // CERES_NO_ACCELERATE_SPARSE
 
diff --git a/internal/ceres/array_utils.cc b/internal/ceres/array_utils.cc
index 32459e6..6bffd84 100644
--- a/internal/ceres/array_utils.cc
+++ b/internal/ceres/array_utils.cc
@@ -35,6 +35,7 @@
 #include <cstddef>
 #include <string>
 #include <vector>
+
 #include "ceres/stringprintf.h"
 #include "ceres/types.h"
 namespace ceres {
@@ -45,7 +46,7 @@
 bool IsArrayValid(const int size, const double* x) {
   if (x != NULL) {
     for (int i = 0; i < size; ++i) {
-      if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue))  {
+      if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
         return false;
       }
     }
@@ -59,7 +60,7 @@
   }
 
   for (int i = 0; i < size; ++i) {
-    if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue))  {
+    if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
       return i;
     }
   }
@@ -92,14 +93,13 @@
 void MapValuesToContiguousRange(const int size, int* array) {
   std::vector<int> unique_values(array, array + size);
   std::sort(unique_values.begin(), unique_values.end());
-  unique_values.erase(std::unique(unique_values.begin(),
-                                  unique_values.end()),
+  unique_values.erase(std::unique(unique_values.begin(), unique_values.end()),
                       unique_values.end());
 
   for (int i = 0; i < size; ++i) {
-    array[i] = std::lower_bound(unique_values.begin(),
-                                unique_values.end(),
-                                array[i]) - unique_values.begin();
+    array[i] =
+        std::lower_bound(unique_values.begin(), unique_values.end(), array[i]) -
+        unique_values.begin();
   }
 }
 
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h
index 1d55733..b068721 100644
--- a/internal/ceres/array_utils.h
+++ b/internal/ceres/array_utils.h
@@ -44,6 +44,7 @@
 #define CERES_INTERNAL_ARRAY_UTILS_H_
 
 #include <string>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
diff --git a/internal/ceres/array_utils_test.cc b/internal/ceres/array_utils_test.cc
index 77379d9..6c0ea84 100644
--- a/internal/ceres/array_utils_test.cc
+++ b/internal/ceres/array_utils_test.cc
@@ -30,9 +30,10 @@
 
 #include "ceres/array_utils.h"
 
-#include <limits>
 #include <cmath>
+#include <limits>
 #include <vector>
+
 #include "gtest/gtest.h"
 
 namespace ceres {
diff --git a/internal/ceres/autodiff_benchmarks/autodiff_benchmarks.cc b/internal/ceres/autodiff_benchmarks/autodiff_benchmarks.cc
index 2533ba6..87e0067 100644
--- a/internal/ceres/autodiff_benchmarks/autodiff_benchmarks.cc
+++ b/internal/ceres/autodiff_benchmarks/autodiff_benchmarks.cc
@@ -55,13 +55,14 @@
 
   template <typename T>
   bool operator()(const T* const* parameters, T* residuals) const {
-    return Apply(parameters, residuals,
-                 std::make_index_sequence<kNumParameterBlocks>());
+    return Apply(
+        parameters, residuals, std::make_index_sequence<kNumParameterBlocks>());
   }
 
  private:
   template <typename T, size_t... Indices>
-  bool Apply(const T* const* parameters, T* residuals,
+  bool Apply(const T* const* parameters,
+             T* residuals,
              std::index_sequence<Indices...>) const {
     return cost_function_(parameters[Indices]..., residuals);
   }
@@ -109,7 +110,9 @@
 
 template <>
 struct CostFunctionFactory<kNotDynamic> {
-  template <typename CostFunctor, int kNumResiduals, int... Ns,
+  template <typename CostFunctor,
+            int kNumResiduals,
+            int... Ns,
             typename... Args>
   static std::unique_ptr<ceres::CostFunction> Create(Args&&... args) {
     return std::make_unique<
@@ -120,7 +123,9 @@
 
 template <>
 struct CostFunctionFactory<kDynamic> {
-  template <typename CostFunctor, int kNumResiduals, int... Ns,
+  template <typename CostFunctor,
+            int kNumResiduals,
+            int... Ns,
             typename... Args>
   static std::unique_ptr<ceres::CostFunction> Create(Args&&... args) {
     constexpr const int kNumParameterBlocks = sizeof...(Ns);
@@ -147,8 +152,8 @@
   double* jacobians[] = {jacobian_values.data()};
 
   std::unique_ptr<ceres::CostFunction> cost_function =
-      CostFunctionFactory<kIsDynamic>::template Create<
-          ConstantCostFunction<kParameterBlockSize>, 1, 1>();
+      CostFunctionFactory<kIsDynamic>::
+          template Create<ConstantCostFunction<kParameterBlockSize>, 1, 1>();
 
   for (auto _ : state) {
     cost_function->Evaluate(parameters, residuals.data(), jacobians);
@@ -186,13 +191,12 @@
   double residuals[1];
   double* jacobians[] = {jacobian1};
 
-  std::unique_ptr<ceres::CostFunction> cost_function =
-      CostFunctionFactory<kIsDynamic>::template Create<Linear1CostFunction, 1,
-                                                       1>();
+  std::unique_ptr<ceres::CostFunction> cost_function = CostFunctionFactory<
+      kIsDynamic>::template Create<Linear1CostFunction, 1, 1>();
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 BENCHMARK_TEMPLATE(BM_Linear1AutoDiff, kNotDynamic)->Arg(0)->Arg(1);
@@ -207,13 +211,12 @@
   double residuals[10];
   double* jacobians[] = {jacobian1};
 
-  std::unique_ptr<ceres::CostFunction> cost_function =
-      CostFunctionFactory<kIsDynamic>::template Create<Linear10CostFunction, 10,
-                                                       10>();
+  std::unique_ptr<ceres::CostFunction> cost_function = CostFunctionFactory<
+      kIsDynamic>::template Create<Linear10CostFunction, 10, 10>();
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 BENCHMARK_TEMPLATE(BM_Linear10AutoDiff, kNotDynamic)->Arg(0)->Arg(1);
@@ -255,8 +258,8 @@
           x, y);
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, &residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, &residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 BENCHMARK_TEMPLATE(BM_Rat43AutoDiff, kNotDynamic)->Arg(0)->Arg(1);
@@ -275,13 +278,12 @@
 
   const double x = 0.2;
   const double y = 0.3;
-  std::unique_ptr<ceres::CostFunction> cost_function =
-      CostFunctionFactory<kIsDynamic>::template Create<SnavelyReprojectionError,
-                                                       2, 9, 3>(x, y);
+  std::unique_ptr<ceres::CostFunction> cost_function = CostFunctionFactory<
+      kIsDynamic>::template Create<SnavelyReprojectionError, 2, 9, 3>(x, y);
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 
@@ -338,14 +340,16 @@
   intrinsics << 128, 128, 1, -1, 0.5, 0.5;
 
   std::unique_ptr<ceres::CostFunction> cost_function =
-      CostFunctionFactory<kIsDynamic>::template Create<
-          FunctorType, FunctorType::PATCH_SIZE, FunctorType::POSE_SIZE,
-          FunctorType::POSE_SIZE, FunctorType::POINT_SIZE>(
+      CostFunctionFactory<kIsDynamic>::template Create<FunctorType,
+                                                       FunctorType::PATCH_SIZE,
+                                                       FunctorType::POSE_SIZE,
+                                                       FunctorType::POSE_SIZE,
+                                                       FunctorType::POINT_SIZE>(
           intensities_host, bearings_host, image_target, intrinsics);
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 
@@ -376,8 +380,8 @@
           q_i_j, t_i_j);
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 
@@ -396,24 +400,25 @@
   auto x = Eigen::Vector3d(0.5, 0.7, -0.1).normalized();
   auto y = Eigen::Vector3d(0.2, -0.2, -0.2).normalized();
 
-  double* parameters[7] = {material, c.data(), n.data(), v.data(),
-                           l.data(), x.data(), y.data()};
+  double* parameters[7] = {
+      material, c.data(), n.data(), v.data(), l.data(), x.data(), y.data()};
 
   double jacobian[(10 + 6 * 3) * 3];
   double residuals[3];
+  // clang-format off
   double* jacobians[7] = {
       jacobian + 0,      jacobian + 10 * 3, jacobian + 13 * 3,
       jacobian + 16 * 3, jacobian + 19 * 3, jacobian + 22 * 3,
       jacobian + 25 * 3,
   };
+  // clang-format on
 
-  std::unique_ptr<ceres::CostFunction> cost_function =
-      CostFunctionFactory<kIsDynamic>::template Create<FunctorType, 3, 10, 3, 3,
-                                                       3, 3, 3, 3>();
+  std::unique_ptr<ceres::CostFunction> cost_function = CostFunctionFactory<
+      kIsDynamic>::template Create<FunctorType, 3, 10, 3, 3, 3, 3, 3, 3>();
 
   for (auto _ : state) {
-    cost_function->Evaluate(parameters, residuals,
-                            state.range(0) ? jacobians : nullptr);
+    cost_function->Evaluate(
+        parameters, residuals, state.range(0) ? jacobians : nullptr);
   }
 }
 
diff --git a/internal/ceres/autodiff_benchmarks/brdf_cost_function.h b/internal/ceres/autodiff_benchmarks/brdf_cost_function.h
index eba0932..9d7c0cc 100644
--- a/internal/ceres/autodiff_benchmarks/brdf_cost_function.h
+++ b/internal/ceres/autodiff_benchmarks/brdf_cost_function.h
@@ -190,10 +190,10 @@
 
   template <typename T>
   inline T GTR2Aniso(const T& n_dot_h,
-              const T& h_dot_x,
-              const T& h_dot_y,
-              const T& ax,
-              const T& ay) const {
+                     const T& h_dot_x,
+                     const T& h_dot_y,
+                     const T& ax,
+                     const T& ay) const {
     return T(1) / (T(M_PI) * ax * ay *
                    Square(Square(h_dot_x / ax) + Square(h_dot_y / ay) +
                           n_dot_h * n_dot_h));
@@ -205,10 +205,10 @@
   }
 
   template <typename Derived1, typename Derived2>
-  inline typename Derived1::PlainObject
-  Lerp(const Eigen::MatrixBase<Derived1>& a,
-       const Eigen::MatrixBase<Derived2>& b,
-       typename Derived1::Scalar alpha) const {
+  inline typename Derived1::PlainObject Lerp(
+      const Eigen::MatrixBase<Derived1>& a,
+      const Eigen::MatrixBase<Derived2>& b,
+      typename Derived1::Scalar alpha) const {
     return (typename Derived1::Scalar(1) - alpha) * a + alpha * b;
   }
 
diff --git a/internal/ceres/autodiff_cost_function_test.cc b/internal/ceres/autodiff_cost_function_test.cc
index 53461cb..cc340f6 100644
--- a/internal/ceres/autodiff_cost_function_test.cc
+++ b/internal/ceres/autodiff_cost_function_test.cc
@@ -32,30 +32,30 @@
 
 #include <memory>
 
-#include "gtest/gtest.h"
-#include "ceres/cost_function.h"
 #include "ceres/array_utils.h"
+#include "ceres/cost_function.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
 
 class BinaryScalarCost {
  public:
-  explicit BinaryScalarCost(double a): a_(a) {}
+  explicit BinaryScalarCost(double a) : a_(a) {}
   template <typename T>
-  bool operator()(const T* const x, const T* const y,
-                  T* cost) const {
-    cost[0] = x[0] * y[0] + x[1] * y[1]  - T(a_);
+  bool operator()(const T* const x, const T* const y, T* cost) const {
+    cost[0] = x[0] * y[0] + x[1] * y[1] - T(a_);
     return true;
   }
+
  private:
   double a_;
 };
 
 TEST(AutodiffCostFunction, BilinearDifferentiationTest) {
-  CostFunction* cost_function  =
-    new AutoDiffCostFunction<BinaryScalarCost, 1, 2, 2>(
-        new BinaryScalarCost(1.0));
+  CostFunction* cost_function =
+      new AutoDiffCostFunction<BinaryScalarCost, 1, 2, 2>(
+          new BinaryScalarCost(1.0));
 
   double** parameters = new double*[2];
   parameters[0] = new double[2];
@@ -112,10 +112,19 @@
 };
 
 TEST(AutodiffCostFunction, ManyParameterAutodiffInstantiates) {
-  CostFunction* cost_function  =
-      new AutoDiffCostFunction<
-          TenParameterCost, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>(
-              new TenParameterCost);
+  CostFunction* cost_function =
+      new AutoDiffCostFunction<TenParameterCost,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1,
+                               1>(new TenParameterCost);
 
   double** parameters = new double*[10];
   double** jacobians = new double*[10];
diff --git a/internal/ceres/autodiff_local_parameterization_test.cc b/internal/ceres/autodiff_local_parameterization_test.cc
index df7723d..36fd3c9 100644
--- a/internal/ceres/autodiff_local_parameterization_test.cc
+++ b/internal/ceres/autodiff_local_parameterization_test.cc
@@ -28,8 +28,10 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include <cmath>
 #include "ceres/autodiff_local_parameterization.h"
+
+#include <cmath>
+
 #include "ceres/local_parameterization.h"
 #include "ceres/rotation.h"
 #include "gtest/gtest.h"
@@ -48,8 +50,7 @@
 };
 
 TEST(AutoDiffLocalParameterizationTest, IdentityParameterization) {
-  AutoDiffLocalParameterization<IdentityPlus, 3, 3>
-      parameterization;
+  AutoDiffLocalParameterization<IdentityPlus, 3, 3> parameterization;
 
   double x[3] = {1.0, 2.0, 3.0};
   double delta[3] = {0.0, 1.0, 2.0};
@@ -71,9 +72,8 @@
 }
 
 struct ScaledPlus {
-  explicit ScaledPlus(const double &scale_factor)
-     : scale_factor_(scale_factor)
-  {}
+  explicit ScaledPlus(const double& scale_factor)
+      : scale_factor_(scale_factor) {}
 
   template <typename T>
   bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
@@ -89,8 +89,8 @@
 TEST(AutoDiffLocalParameterizationTest, ScaledParameterization) {
   const double kTolerance = 1e-14;
 
-  AutoDiffLocalParameterization<ScaledPlus, 3, 3>
-      parameterization(new ScaledPlus(1.2345));
+  AutoDiffLocalParameterization<ScaledPlus, 3, 3> parameterization(
+      new ScaledPlus(1.2345));
 
   double x[3] = {1.0, 2.0, 3.0};
   double delta[3] = {0.0, 1.0, 2.0};
@@ -112,7 +112,7 @@
 }
 
 struct QuaternionPlus {
-  template<typename T>
+  template <typename T>
   bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
     const T squared_norm_delta =
         delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2];
@@ -147,7 +147,6 @@
   double x_plus_delta_ref[4] = {0.0, 0.0, 0.0, 0.0};
   double jacobian_ref[12];
 
-
   QuaternionParameterization ref_parameterization;
   ref_parameterization.Plus(x, delta, x_plus_delta_ref);
   ref_parameterization.ComputeJacobian(x, jacobian_ref);
@@ -162,20 +161,22 @@
     EXPECT_NEAR(x_plus_delta[i], x_plus_delta_ref[i], kTolerance);
   }
 
+  // clang-format off
   const double x_plus_delta_norm =
       sqrt(x_plus_delta[0] * x_plus_delta[0] +
            x_plus_delta[1] * x_plus_delta[1] +
            x_plus_delta[2] * x_plus_delta[2] +
            x_plus_delta[3] * x_plus_delta[3]);
+  // clang-format on
 
   EXPECT_NEAR(x_plus_delta_norm, 1.0, kTolerance);
 
   for (int i = 0; i < 12; ++i) {
     EXPECT_TRUE(std::isfinite(jacobian[i]));
     EXPECT_NEAR(jacobian[i], jacobian_ref[i], kTolerance)
-        << "Jacobian mismatch: i = " << i
-        << "\n Expected \n" << ConstMatrixRef(jacobian_ref, 4, 3)
-        << "\n Actual \n" << ConstMatrixRef(jacobian, 4, 3);
+        << "Jacobian mismatch: i = " << i << "\n Expected \n"
+        << ConstMatrixRef(jacobian_ref, 4, 3) << "\n Actual \n"
+        << ConstMatrixRef(jacobian, 4, 3);
   }
 }
 
@@ -185,13 +186,14 @@
   QuaternionParameterizationTestHelper(x, delta);
 }
 
-
 TEST(AutoDiffLocalParameterization, QuaternionParameterizationNearZeroTest) {
   double x[4] = {0.52, 0.25, 0.15, 0.45};
+  // clang-format off
   double norm_x = sqrt(x[0] * x[0] +
                        x[1] * x[1] +
                        x[2] * x[2] +
                        x[3] * x[3]);
+  // clang-format on
   for (int i = 0; i < 4; ++i) {
     x[i] = x[i] / norm_x;
   }
@@ -206,10 +208,12 @@
 
 TEST(AutoDiffLocalParameterization, QuaternionParameterizationNonZeroTest) {
   double x[4] = {0.52, 0.25, 0.15, 0.45};
+  // clang-format off
   double norm_x = sqrt(x[0] * x[0] +
                        x[1] * x[1] +
                        x[2] * x[2] +
                        x[3] * x[3]);
+  // clang-format on
 
   for (int i = 0; i < 4; ++i) {
     x[i] = x[i] / norm_x;
diff --git a/internal/ceres/blas.cc b/internal/ceres/blas.cc
index 3ba63bb..f8d006e 100644
--- a/internal/ceres/blas.cc
+++ b/internal/ceres/blas.cc
@@ -29,6 +29,7 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/blas.h"
+
 #include "ceres/internal/port.h"
 #include "glog/logging.h"
 
diff --git a/internal/ceres/block_evaluate_preparer.cc b/internal/ceres/block_evaluate_preparer.cc
index 59c0d3e..7db96d9 100644
--- a/internal/ceres/block_evaluate_preparer.cc
+++ b/internal/ceres/block_evaluate_preparer.cc
@@ -31,6 +31,7 @@
 #include "ceres/block_evaluate_preparer.h"
 
 #include <vector>
+
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/casts.h"
 #include "ceres/parameter_block.h"
@@ -53,10 +54,8 @@
                                     double** jacobians) {
   // If the overall jacobian is not available, use the scratch space.
   if (jacobian == NULL) {
-    scratch_evaluate_preparer_.Prepare(residual_block,
-                                       residual_block_index,
-                                       jacobian,
-                                       jacobians);
+    scratch_evaluate_preparer_.Prepare(
+        residual_block, residual_block_index, jacobian, jacobians);
     return;
   }
 
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
index 772c7af..6f37aca 100644
--- a/internal/ceres/block_jacobi_preconditioner.cc
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -30,9 +30,9 @@
 
 #include "ceres/block_jacobi_preconditioner.h"
 
+#include "ceres/block_random_access_diagonal_matrix.h"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/block_structure.h"
-#include "ceres/block_random_access_diagonal_matrix.h"
 #include "ceres/casts.h"
 #include "ceres/internal/eigen.h"
 
@@ -65,13 +65,11 @@
       const int col_block_size = bs->cols[block_id].size;
 
       int r, c, row_stride, col_stride;
-      CellInfo* cell_info = m_->GetCell(block_id, block_id,
-                                        &r, &c,
-                                        &row_stride, &col_stride);
+      CellInfo* cell_info =
+          m_->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride);
       MatrixRef m(cell_info->values, row_stride, col_stride);
-      ConstMatrixRef b(values + cells[j].position,
-                       row_block_size,
-                       col_block_size);
+      ConstMatrixRef b(
+          values + cells[j].position, row_block_size, col_block_size);
       m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
     }
   }
@@ -82,9 +80,7 @@
     for (int i = 0; i < bs->cols.size(); ++i) {
       const int block_size = bs->cols[i].size;
       int r, c, row_stride, col_stride;
-      CellInfo* cell_info = m_->GetCell(i, i,
-                                        &r, &c,
-                                        &row_stride, &col_stride);
+      CellInfo* cell_info = m_->GetCell(i, i, &r, &c, &row_stride, &col_stride);
       MatrixRef m(cell_info->values, row_stride, col_stride);
       m.block(r, c, block_size, block_size).diagonal() +=
           ConstVectorRef(D + position, block_size).array().square().matrix();
diff --git a/internal/ceres/block_jacobi_preconditioner.h b/internal/ceres/block_jacobi_preconditioner.h
index 856b506..4ede9b9 100644
--- a/internal/ceres/block_jacobi_preconditioner.h
+++ b/internal/ceres/block_jacobi_preconditioner.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
 
 #include <memory>
+
 #include "ceres/block_random_access_diagonal_matrix.h"
 #include "ceres/preconditioner.h"
 
diff --git a/internal/ceres/block_jacobi_preconditioner_test.cc b/internal/ceres/block_jacobi_preconditioner_test.cc
index 4a9a871..cc582c6 100644
--- a/internal/ceres/block_jacobi_preconditioner_test.cc
+++ b/internal/ceres/block_jacobi_preconditioner_test.cc
@@ -32,16 +32,16 @@
 
 #include <memory>
 #include <vector>
-#include "ceres/block_random_access_diagonal_matrix.h"
-#include "ceres/linear_least_squares_problems.h"
-#include "ceres/block_sparse_matrix.h"
-#include "gtest/gtest.h"
+
 #include "Eigen/Dense"
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
 
-
 class BlockJacobiPreconditionerTest : public ::testing::Test {
  protected:
   void SetUpFromProblemId(int problem_id) {
@@ -56,7 +56,10 @@
     A->ToDenseMatrix(&dense_a);
     dense_ata = dense_a.transpose() * dense_a;
     dense_ata += VectorRef(D.get(), A->num_cols())
-        .array().square().matrix().asDiagonal();
+                     .array()
+                     .square()
+                     .matrix()
+                     .asDiagonal();
   }
 
   void VerifyDiagonalBlocks(const int problem_id) {
@@ -73,17 +76,14 @@
     for (int i = 0; i < bs->cols.size(); ++i) {
       const int block_size = bs->cols[i].size;
       int r, c, row_stride, col_stride;
-      CellInfo* cell_info = m->GetCell(i, i,
-                                       &r, &c,
-                                       &row_stride, &col_stride);
+      CellInfo* cell_info = m->GetCell(i, i, &r, &c, &row_stride, &col_stride);
       MatrixRef m(cell_info->values, row_stride, col_stride);
       Matrix actual_block_inverse = m.block(r, c, block_size, block_size);
-      Matrix expected_block = dense_ata.block(bs->cols[i].position,
-                                              bs->cols[i].position,
-                                              block_size,
-                                              block_size);
+      Matrix expected_block = dense_ata.block(
+          bs->cols[i].position, bs->cols[i].position, block_size, block_size);
       const double residual = (actual_block_inverse * expected_block -
-                               Matrix::Identity(block_size, block_size)).norm();
+                               Matrix::Identity(block_size, block_size))
+                                  .norm();
       EXPECT_NEAR(residual, 0.0, 1e-12) << "Block: " << i;
     }
   }
@@ -93,13 +93,9 @@
   Matrix dense_ata;
 };
 
-TEST_F(BlockJacobiPreconditionerTest, SmallProblem) {
-  VerifyDiagonalBlocks(2);
-}
+TEST_F(BlockJacobiPreconditionerTest, SmallProblem) { VerifyDiagonalBlocks(2); }
 
-TEST_F(BlockJacobiPreconditionerTest, LargeProblem) {
-  VerifyDiagonalBlocks(3);
-}
+TEST_F(BlockJacobiPreconditionerTest, LargeProblem) { VerifyDiagonalBlocks(3); }
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/block_jacobian_writer.cc b/internal/ceres/block_jacobian_writer.cc
index 6998bd6..17c157b 100644
--- a/internal/ceres/block_jacobian_writer.cc
+++ b/internal/ceres/block_jacobian_writer.cc
@@ -32,11 +32,11 @@
 
 #include "ceres/block_evaluate_preparer.h"
 #include "ceres/block_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
 #include "ceres/parameter_block.h"
 #include "ceres/program.h"
 #include "ceres/residual_block.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/port.h"
 
 namespace ceres {
 namespace internal {
diff --git a/internal/ceres/block_jacobian_writer.h b/internal/ceres/block_jacobian_writer.h
index c94a0d3..8054d7b 100644
--- a/internal/ceres/block_jacobian_writer.h
+++ b/internal/ceres/block_jacobian_writer.h
@@ -39,6 +39,7 @@
 #define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
 
 #include <vector>
+
 #include "ceres/evaluator.h"
 #include "ceres/internal/port.h"
 
@@ -52,8 +53,7 @@
 // TODO(sameeragarwal): This class needs documemtation.
 class BlockJacobianWriter {
  public:
-  BlockJacobianWriter(const Evaluator::Options& options,
-                      Program* program);
+  BlockJacobianWriter(const Evaluator::Options& options, Program* program);
 
   // JacobianWriter interface.
 
diff --git a/internal/ceres/block_random_access_dense_matrix.cc b/internal/ceres/block_random_access_dense_matrix.cc
index f567aa5..386f81e 100644
--- a/internal/ceres/block_random_access_dense_matrix.cc
+++ b/internal/ceres/block_random_access_dense_matrix.cc
@@ -31,6 +31,7 @@
 #include "ceres/block_random_access_dense_matrix.h"
 
 #include <vector>
+
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
@@ -59,8 +60,7 @@
 
 // Assume that the user does not hold any locks on any cell blocks
 // when they are calling SetZero.
-BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {
-}
+BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {}
 
 CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
                                                 const int col_block_id,
diff --git a/internal/ceres/block_random_access_dense_matrix.h b/internal/ceres/block_random_access_dense_matrix.h
index 8c5e252..be67f34 100644
--- a/internal/ceres/block_random_access_dense_matrix.h
+++ b/internal/ceres/block_random_access_dense_matrix.h
@@ -31,11 +31,10 @@
 #ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
 #define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
 
-#include "ceres/block_random_access_matrix.h"
-
 #include <memory>
 #include <vector>
 
+#include "ceres/block_random_access_matrix.h"
 #include "ceres/internal/port.h"
 
 namespace ceres {
diff --git a/internal/ceres/block_random_access_dense_matrix_test.cc b/internal/ceres/block_random_access_dense_matrix_test.cc
index 8a5ba59..0736d56 100644
--- a/internal/ceres/block_random_access_dense_matrix_test.cc
+++ b/internal/ceres/block_random_access_dense_matrix_test.cc
@@ -28,10 +28,12 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include <vector>
-#include "gtest/gtest.h"
 #include "ceres/block_random_access_dense_matrix.h"
+
+#include <vector>
+
 #include "ceres/internal/eigen.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -54,8 +56,7 @@
       int col;
       int row_stride;
       int col_stride;
-      CellInfo* cell =
-          m.GetCell(i, j, &row, &col, &row_stride, &col_stride);
+      CellInfo* cell = m.GetCell(i, j, &row, &col, &row_stride, &col_stride);
 
       EXPECT_TRUE(cell != NULL);
       EXPECT_EQ(row, row_idx);
@@ -84,11 +85,10 @@
       int col;
       int row_stride;
       int col_stride;
-      CellInfo* cell = m.GetCell(
-          i, j, &row, &col, &row_stride, &col_stride);
-      MatrixRef(cell->values, row_stride, col_stride).block(
-          row, col, blocks[i], blocks[j]) =
-          (i+1) * (j+1) * Matrix::Ones(blocks[i], blocks[j]);
+      CellInfo* cell = m.GetCell(i, j, &row, &col, &row_stride, &col_stride);
+      MatrixRef(cell->values, row_stride, col_stride)
+          .block(row, col, blocks[i], blocks[j]) =
+          (i + 1) * (j + 1) * Matrix::Ones(blocks[i], blocks[j]);
     }
   }
 
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
index 526d173..08f6d7f 100644
--- a/internal/ceres/block_random_access_diagonal_matrix.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -63,9 +63,8 @@
     num_nonzeros += blocks_[i] * blocks_[i];
   }
 
-  VLOG(1) << "Matrix Size [" << num_cols
-          << "," << num_cols
-          << "] " << num_nonzeros;
+  VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
+          << num_nonzeros;
 
   tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
   tsm_->set_num_nonzeros(num_nonzeros);
@@ -116,8 +115,7 @@
 // when they are calling SetZero.
 void BlockRandomAccessDiagonalMatrix::SetZero() {
   if (tsm_->num_nonzeros()) {
-    VectorRef(tsm_->mutable_values(),
-              tsm_->num_nonzeros()).setZero();
+    VectorRef(tsm_->mutable_values(), tsm_->num_nonzeros()).setZero();
   }
 }
 
@@ -126,11 +124,8 @@
   for (int i = 0; i < blocks_.size(); ++i) {
     const int block_size = blocks_[i];
     MatrixRef block(values, block_size, block_size);
-    block =
-        block
-        .selfadjointView<Eigen::Upper>()
-        .llt()
-        .solve(Matrix::Identity(block_size, block_size));
+    block = block.selfadjointView<Eigen::Upper>().llt().solve(
+        Matrix::Identity(block_size, block_size));
     values += block_size * block_size;
   }
 }
diff --git a/internal/ceres/block_random_access_diagonal_matrix.h b/internal/ceres/block_random_access_diagonal_matrix.h
index 3bda7d1..4d3e1cc 100644
--- a/internal/ceres/block_random_access_diagonal_matrix.h
+++ b/internal/ceres/block_random_access_diagonal_matrix.h
@@ -50,7 +50,8 @@
  public:
   // blocks is an array of block sizes.
   explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
-  BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) = delete;
+  BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) =
+      delete;
   void operator=(const BlockRandomAccessDiagonalMatrix&) = delete;
 
   // The destructor is not thread safe. It assumes that no one is
diff --git a/internal/ceres/block_random_access_diagonal_matrix_test.cc b/internal/ceres/block_random_access_diagonal_matrix_test.cc
index a54595c..e384dac 100644
--- a/internal/ceres/block_random_access_diagonal_matrix_test.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix_test.cc
@@ -28,15 +28,16 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/block_random_access_diagonal_matrix.h"
+
 #include <limits>
 #include <memory>
 #include <vector>
 
-#include "ceres/block_random_access_diagonal_matrix.h"
+#include "Eigen/Cholesky"
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
-#include "Eigen/Cholesky"
 
 namespace ceres {
 namespace internal {
@@ -49,7 +50,7 @@
     blocks.push_back(4);
     blocks.push_back(5);
     const int num_rows = 3 + 4 + 5;
-    num_nonzeros_ =  3 * 3 + 4 * 4 + 5 * 5;
+    num_nonzeros_ = 3 * 3 + 4 * 4 + 5 * 5;
 
     m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
 
@@ -66,9 +67,8 @@
 
       for (int j = 0; j < blocks.size(); ++j) {
         col_block_id = j;
-        CellInfo* cell =  m_->GetCell(row_block_id, col_block_id,
-                                    &row, &col,
-                                    &row_stride, &col_stride);
+        CellInfo* cell = m_->GetCell(
+            row_block_id, col_block_id, &row, &col, &row_stride, &col_stride);
         // Off diagonal entries are not present.
         if (i != j) {
           EXPECT_TRUE(cell == NULL);
@@ -82,11 +82,11 @@
         EXPECT_EQ(col_stride, blocks[col_block_id]);
 
         // Write into the block
-        MatrixRef(cell->values, row_stride, col_stride).block(
-            row, col, blocks[row_block_id], blocks[col_block_id]) =
-            (row_block_id + 1) * (col_block_id +1) *
-            Matrix::Ones(blocks[row_block_id], blocks[col_block_id])
-            + Matrix::Identity(blocks[row_block_id], blocks[row_block_id]);
+        MatrixRef(cell->values, row_stride, col_stride)
+            .block(row, col, blocks[row_block_id], blocks[col_block_id]) =
+            (row_block_id + 1) * (col_block_id + 1) *
+                Matrix::Ones(blocks[row_block_id], blocks[col_block_id]) +
+            Matrix::Identity(blocks[row_block_id], blocks[row_block_id]);
       }
     }
   }
@@ -107,28 +107,31 @@
   double kTolerance = 1e-14;
 
   // (0,0)
-  EXPECT_NEAR((dense.block(0, 0, 3, 3) -
-               (Matrix::Ones(3, 3) + Matrix::Identity(3, 3))).norm(),
-              0.0,
-              kTolerance);
+  EXPECT_NEAR(
+      (dense.block(0, 0, 3, 3) - (Matrix::Ones(3, 3) + Matrix::Identity(3, 3)))
+          .norm(),
+      0.0,
+      kTolerance);
 
   // (1,1)
   EXPECT_NEAR((dense.block(3, 3, 4, 4) -
-               (2 * 2 * Matrix::Ones(4, 4) + Matrix::Identity(4, 4))).norm(),
+               (2 * 2 * Matrix::Ones(4, 4) + Matrix::Identity(4, 4)))
+                  .norm(),
               0.0,
               kTolerance);
 
   // (1,1)
   EXPECT_NEAR((dense.block(7, 7, 5, 5) -
-               (3 * 3 * Matrix::Ones(5, 5) + Matrix::Identity(5, 5))).norm(),
+               (3 * 3 * Matrix::Ones(5, 5) + Matrix::Identity(5, 5)))
+                  .norm(),
               0.0,
               kTolerance);
 
   // There is nothing else in the matrix besides these four blocks.
-  EXPECT_NEAR(dense.norm(),
-              sqrt(6 * 1.0 + 3 * 4.0 +
-                   12 * 16.0 + 4 * 25.0 +
-                   20 * 81.0 + 5 * 100.0), kTolerance);
+  EXPECT_NEAR(
+      dense.norm(),
+      sqrt(6 * 1.0 + 3 * 4.0 + 12 * 16.0 + 4 * 25.0 + 20 * 81.0 + 5 * 100.0),
+      kTolerance);
 }
 
 TEST_F(BlockRandomAccessDiagonalMatrixTest, RightMultiply) {
@@ -139,7 +142,7 @@
   Vector x = Vector::Random(dense.rows());
   Vector expected_y = dense * x;
   Vector actual_y = Vector::Zero(dense.rows());
-  m_->RightMultiply(x.data(),  actual_y.data());
+  m_->RightMultiply(x.data(), actual_y.data());
   EXPECT_NEAR((expected_y - actual_y).norm(), 0, kTolerance);
 }
 
diff --git a/internal/ceres/block_random_access_matrix.cc b/internal/ceres/block_random_access_matrix.cc
index 347d765..ea88855 100644
--- a/internal/ceres/block_random_access_matrix.cc
+++ b/internal/ceres/block_random_access_matrix.cc
@@ -33,8 +33,7 @@
 namespace ceres {
 namespace internal {
 
-BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {
-}
+BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
index 9c16454..c28b7ce 100644
--- a/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/internal/ceres/block_random_access_sparse_matrix.cc
@@ -50,10 +50,8 @@
 using std::vector;
 
 BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
-    const vector<int>& blocks,
-    const set<pair<int, int>>& block_pairs)
-    : kMaxRowBlocks(10 * 1000 * 1000),
-      blocks_(blocks) {
+    const vector<int>& blocks, const set<pair<int, int>>& block_pairs)
+    : kMaxRowBlocks(10 * 1000 * 1000), blocks_(blocks) {
   CHECK_LT(blocks.size(), kMaxRowBlocks);
 
   // Build the row/column layout vector and count the number of scalar
@@ -75,9 +73,8 @@
     num_nonzeros += row_block_size * col_block_size;
   }
 
-  VLOG(1) << "Matrix Size [" << num_cols
-          << "," << num_cols
-          << "] " << num_nonzeros;
+  VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
+          << num_nonzeros;
 
   tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
   tsm_->set_num_nonzeros(num_nonzeros);
@@ -105,11 +102,11 @@
         layout_[IntPairToLong(row_block_id, col_block_id)]->values - values;
     for (int r = 0; r < row_block_size; ++r) {
       for (int c = 0; c < col_block_size; ++c, ++pos) {
-          rows[pos] = block_positions_[row_block_id] + r;
-          cols[pos] = block_positions_[col_block_id] + c;
-          values[pos] = 1.0;
-          DCHECK_LT(rows[pos], tsm_->num_rows());
-          DCHECK_LT(cols[pos], tsm_->num_rows());
+        rows[pos] = block_positions_[row_block_id] + r;
+        cols[pos] = block_positions_[col_block_id] + c;
+        values[pos] = 1.0;
+        DCHECK_LT(rows[pos], tsm_->num_rows());
+        DCHECK_LT(cols[pos], tsm_->num_rows());
       }
     }
   }
@@ -129,7 +126,7 @@
                                                  int* col,
                                                  int* row_stride,
                                                  int* col_stride) {
-  const LayoutType::iterator it  =
+  const LayoutType::iterator it =
       layout_.find(IntPairToLong(row_block_id, col_block_id));
   if (it == layout_.end()) {
     return NULL;
@@ -147,8 +144,7 @@
 // when they are calling SetZero.
 void BlockRandomAccessSparseMatrix::SetZero() {
   if (tsm_->num_nonzeros()) {
-    VectorRef(tsm_->mutable_values(),
-              tsm_->num_nonzeros()).setZero();
+    VectorRef(tsm_->mutable_values(), tsm_->num_nonzeros()).setZero();
   }
 }
 
@@ -164,7 +160,9 @@
     const int col_block_pos = block_positions_[col];
 
     MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-        cell_position_and_data.second, row_block_size, col_block_size,
+        cell_position_and_data.second,
+        row_block_size,
+        col_block_size,
         x + col_block_pos,
         y + row_block_pos);
 
@@ -174,7 +172,9 @@
     // triangular multiply also.
     if (row != col) {
       MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-          cell_position_and_data.second, row_block_size, col_block_size,
+          cell_position_and_data.second,
+          row_block_size,
+          col_block_size,
           x + row_block_pos,
           y + col_block_pos);
     }
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
index d542a3d..ca04163 100644
--- a/internal/ceres/block_random_access_sparse_matrix.h
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -39,10 +39,10 @@
 #include <vector>
 
 #include "ceres/block_random_access_matrix.h"
-#include "ceres/triplet_sparse_matrix.h"
 #include "ceres/internal/port.h"
-#include "ceres/types.h"
 #include "ceres/small_blas.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
 
 namespace ceres {
 namespace internal {
@@ -110,7 +110,7 @@
 
   // A mapping from <row_block_id, col_block_id> to the position in
   // the values array of tsm_ where the block is stored.
-  typedef std::unordered_map<long int, CellInfo* > LayoutType;
+  typedef std::unordered_map<long int, CellInfo*> LayoutType;
   LayoutType layout_;
 
   // In order traversal of contents of the matrix. This allows us to
diff --git a/internal/ceres/block_random_access_sparse_matrix_test.cc b/internal/ceres/block_random_access_sparse_matrix_test.cc
index fa0698f..557b678 100644
--- a/internal/ceres/block_random_access_sparse_matrix_test.cc
+++ b/internal/ceres/block_random_access_sparse_matrix_test.cc
@@ -28,11 +28,12 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/block_random_access_sparse_matrix.h"
+
 #include <limits>
 #include <memory>
 #include <vector>
 
-#include "ceres/block_random_access_sparse_matrix.h"
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
@@ -77,9 +78,8 @@
     int col;
     int row_stride;
     int col_stride;
-    CellInfo* cell =  m.GetCell(row_block_id, col_block_id,
-                                &row, &col,
-                                &row_stride, &col_stride);
+    CellInfo* cell = m.GetCell(
+        row_block_id, col_block_id, &row, &col, &row_stride, &col_stride);
     EXPECT_TRUE(cell != NULL);
     EXPECT_EQ(row, 0);
     EXPECT_EQ(col, 0);
@@ -87,9 +87,9 @@
     EXPECT_EQ(col_stride, blocks[col_block_id]);
 
     // Write into the block
-    MatrixRef(cell->values, row_stride, col_stride).block(
-        row, col, blocks[row_block_id], blocks[col_block_id]) =
-        (row_block_id + 1) * (col_block_id +1) *
+    MatrixRef(cell->values, row_stride, col_stride)
+        .block(row, col, blocks[row_block_id], blocks[col_block_id]) =
+        (row_block_id + 1) * (col_block_id + 1) *
         Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
   }
 
@@ -103,9 +103,8 @@
   double kTolerance = 1e-14;
 
   // (0, 0)
-  EXPECT_NEAR((dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(),
-              0.0,
-              kTolerance);
+  EXPECT_NEAR(
+      (dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(), 0.0, kTolerance);
   // (1, 1)
   EXPECT_NEAR((dense.block(3, 3, 4, 4) - 2 * 2 * Matrix::Ones(4, 4)).norm(),
               0.0,
@@ -120,8 +119,8 @@
               kTolerance);
 
   // There is nothing else in the matrix besides these four blocks.
-  EXPECT_NEAR(dense.norm(), sqrt(9. + 16. * 16. + 36. * 20. + 9. * 15.),
-              kTolerance);
+  EXPECT_NEAR(
+      dense.norm(), sqrt(9. + 16. * 16. + 36. * 20. + 9. * 15.), kTolerance);
 
   Vector x = Vector::Ones(dense.rows());
   Vector actual_y = Vector::Zero(dense.rows());
@@ -131,8 +130,7 @@
   m.SymmetricRightMultiply(x.data(), actual_y.data());
   EXPECT_NEAR((expected_y - actual_y).norm(), 0.0, kTolerance)
       << "actual: " << actual_y.transpose() << "\n"
-      << "expected: " << expected_y.transpose()
-      << "matrix: \n " << dense;
+      << "expected: " << expected_y.transpose() << "matrix: \n " << dense;
 }
 
 // IntPairToLong is private, thus this fixture is needed to access and
@@ -155,14 +153,13 @@
   }
 
   void CheckLongToIntPair() {
-    uint64_t max_rows =  m_->kMaxRowBlocks;
+    uint64_t max_rows = m_->kMaxRowBlocks;
     for (int row = max_rows - 10; row < max_rows; ++row) {
       for (int col = 0; col < 10; ++col) {
         int row_computed;
         int col_computed;
-        m_->LongToIntPair(m_->IntPairToLong(row, col),
-                          &row_computed,
-                          &col_computed);
+        m_->LongToIntPair(
+            m_->IntPairToLong(row, col), &row_computed, &col_computed);
         EXPECT_EQ(row, row_computed);
         EXPECT_EQ(col, col_computed);
       }
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
index 8f50f35..5efd2e1 100644
--- a/internal/ceres/block_sparse_matrix.cc
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -30,9 +30,10 @@
 
 #include "ceres/block_sparse_matrix.h"
 
-#include <cstddef>
 #include <algorithm>
+#include <cstddef>
 #include <vector>
+
 #include "ceres/block_structure.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/random.h"
@@ -77,8 +78,8 @@
   CHECK_GE(num_rows_, 0);
   CHECK_GE(num_cols_, 0);
   CHECK_GE(num_nonzeros_, 0);
-  VLOG(2) << "Allocating values array with "
-          << num_nonzeros_ * sizeof(double) << " bytes.";  // NOLINT
+  VLOG(2) << "Allocating values array with " << num_nonzeros_ * sizeof(double)
+          << " bytes.";  // NOLINT
   values_.reset(new double[num_nonzeros_]);
   max_num_nonzeros_ = num_nonzeros_;
   CHECK(values_ != nullptr);
@@ -88,7 +89,7 @@
   std::fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
 }
 
-void BlockSparseMatrix::RightMultiply(const double* x,  double* y) const {
+void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
   CHECK(x != nullptr);
   CHECK(y != nullptr);
 
@@ -101,7 +102,9 @@
       int col_block_size = block_structure_->cols[col_block_id].size;
       int col_block_pos = block_structure_->cols[col_block_id].position;
       MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-          values_.get() + cells[j].position, row_block_size, col_block_size,
+          values_.get() + cells[j].position,
+          row_block_size,
+          col_block_size,
           x + col_block_pos,
           y + row_block_pos);
     }
@@ -121,7 +124,9 @@
       int col_block_size = block_structure_->cols[col_block_id].size;
       int col_block_pos = block_structure_->cols[col_block_id].position;
       MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-          values_.get() + cells[j].position, row_block_size, col_block_size,
+          values_.get() + cells[j].position,
+          row_block_size,
+          col_block_size,
           x + row_block_pos,
           y + col_block_pos);
     }
@@ -138,8 +143,8 @@
       int col_block_id = cells[j].block_id;
       int col_block_size = block_structure_->cols[col_block_id].size;
       int col_block_pos = block_structure_->cols[col_block_id].position;
-      const MatrixRef m(values_.get() + cells[j].position,
-                        row_block_size, col_block_size);
+      const MatrixRef m(
+          values_.get() + cells[j].position, row_block_size, col_block_size);
       VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
     }
   }
@@ -155,8 +160,8 @@
       int col_block_id = cells[j].block_id;
       int col_block_size = block_structure_->cols[col_block_id].size;
       int col_block_pos = block_structure_->cols[col_block_id].position;
-      MatrixRef m(values_.get() + cells[j].position,
-                        row_block_size, col_block_size);
+      MatrixRef m(
+          values_.get() + cells[j].position, row_block_size, col_block_size);
       m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
     }
   }
@@ -178,8 +183,8 @@
       int col_block_size = block_structure_->cols[col_block_id].size;
       int col_block_pos = block_structure_->cols[col_block_id].position;
       int jac_pos = cells[j].position;
-      m.block(row_block_pos, col_block_pos, row_block_size, col_block_size)
-          += MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
+      m.block(row_block_pos, col_block_pos, row_block_size, col_block_size) +=
+          MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
     }
   }
 }
@@ -201,7 +206,7 @@
       int col_block_size = block_structure_->cols[col_block_id].size;
       int col_block_pos = block_structure_->cols[col_block_id].position;
       int jac_pos = cells[j].position;
-       for (int r = 0; r < row_block_size; ++r) {
+      for (int r = 0; r < row_block_size; ++r) {
         for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
           matrix->mutable_rows()[jac_pos] = row_block_pos + r;
           matrix->mutable_cols()[jac_pos] = col_block_pos + c;
@@ -215,8 +220,7 @@
 
 // Return a pointer to the block structure. We continue to hold
 // ownership of the object though.
-const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
-    const {
+const CompressedRowBlockStructure* BlockSparseMatrix::block_structure() const {
   return block_structure_.get();
 }
 
@@ -233,7 +237,8 @@
       int jac_pos = cells[j].position;
       for (int r = 0; r < row_block_size; ++r) {
         for (int c = 0; c < col_block_size; ++c) {
-          fprintf(file, "% 10d % 10d %17f\n",
+          fprintf(file,
+                  "% 10d % 10d %17f\n",
                   row_block_pos + r,
                   col_block_pos + c,
                   values_[jac_pos++]);
@@ -369,7 +374,6 @@
     int row_block_position = 0;
     int value_position = 0;
     for (int r = 0; r < options.num_row_blocks; ++r) {
-
       const int delta_block_size =
           Uniform(options.max_row_block_size - options.min_row_block_size);
       const int row_block_size = options.min_row_block_size + delta_block_size;
diff --git a/internal/ceres/block_sparse_matrix.h b/internal/ceres/block_sparse_matrix.h
index d0c255d..fa9e4ed 100644
--- a/internal/ceres/block_sparse_matrix.h
+++ b/internal/ceres/block_sparse_matrix.h
@@ -35,9 +35,10 @@
 #define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
 
 #include <memory>
+
 #include "ceres/block_structure.h"
-#include "ceres/sparse_matrix.h"
 #include "ceres/internal/eigen.h"
+#include "ceres/sparse_matrix.h"
 
 namespace ceres {
 namespace internal {
@@ -77,11 +78,13 @@
   void ToDenseMatrix(Matrix* dense_matrix) const final;
   void ToTextFile(FILE* file) const final;
 
+  // clang-format off
   int num_rows()         const final { return num_rows_;     }
   int num_cols()         const final { return num_cols_;     }
   int num_nonzeros()     const final { return num_nonzeros_; }
   const double* values() const final { return values_.get(); }
   double* mutable_values()     final { return values_.get(); }
+  // clang-format on
 
   void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
   const CompressedRowBlockStructure* block_structure() const;
@@ -94,8 +97,7 @@
   void DeleteRowBlocks(int delta_row_blocks);
 
   static BlockSparseMatrix* CreateDiagonalMatrix(
-      const double* diagonal,
-      const std::vector<Block>& column_blocks);
+      const double* diagonal, const std::vector<Block>& column_blocks);
 
   struct RandomMatrixOptions {
     int num_row_blocks = 0;
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
index 0c88bd9..02d3fb1 100644
--- a/internal/ceres/block_sparse_matrix_test.cc
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -32,6 +32,7 @@
 
 #include <memory>
 #include <string>
+
 #include "ceres/casts.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/linear_least_squares_problems.h"
@@ -43,7 +44,7 @@
 namespace internal {
 
 class BlockSparseMatrixTest : public ::testing::Test {
- protected :
+ protected:
   void SetUp() final {
     std::unique_ptr<LinearLeastSquaresProblem> problem(
         CreateLinearLeastSquaresProblemFromId(2));
@@ -159,13 +160,13 @@
 
     A_->RightMultiply(x.data(), y_a.data());
     B_->RightMultiply(x.data(), y_b.data());
-    EXPECT_LT((y_a.head(B_->num_rows()) - y_b.head(B_->num_rows())).norm(), 1e-12);
+    EXPECT_LT((y_a.head(B_->num_rows()) - y_b.head(B_->num_rows())).norm(),
+              1e-12);
     Vector expected_tail = Vector::Zero(A_->num_cols());
     expected_tail(i) = diagonal(i);
     EXPECT_LT((y_a.tail(A_->num_cols()) - expected_tail).norm(), 1e-12);
   }
 
-
   A_->DeleteRowBlocks(column_blocks.size());
   EXPECT_EQ(A_->num_rows(), B_->num_rows());
   EXPECT_EQ(A_->num_cols(), B_->num_cols());
@@ -213,6 +214,5 @@
   }
 }
 
-
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/block_structure.cc b/internal/ceres/block_structure.cc
index 6479b60..39ba082 100644
--- a/internal/ceres/block_structure.cc
+++ b/internal/ceres/block_structure.cc
@@ -35,7 +35,7 @@
 
 bool CellLessThan(const Cell& lhs, const Cell& rhs) {
   if (lhs.block_id == rhs.block_id) {
-    return (lhs.position  < rhs.position);
+    return (lhs.position < rhs.position);
   }
   return (lhs.block_id < rhs.block_id);
 }
diff --git a/internal/ceres/block_structure.h b/internal/ceres/block_structure.h
index b5218c0..d49d7d3 100644
--- a/internal/ceres/block_structure.h
+++ b/internal/ceres/block_structure.h
@@ -40,6 +40,7 @@
 
 #include <cstdint>
 #include <vector>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
diff --git a/internal/ceres/bundle_adjustment_test_util.h b/internal/ceres/bundle_adjustment_test_util.h
index 7e076eb..074931f 100644
--- a/internal/ceres/bundle_adjustment_test_util.h
+++ b/internal/ceres/bundle_adjustment_test_util.h
@@ -37,9 +37,8 @@
 #include <cstdlib>
 #include <string>
 
-#include "ceres/internal/port.h"
-
 #include "ceres/autodiff_cost_function.h"
+#include "ceres/internal/port.h"
 #include "ceres/ordered_groups.h"
 #include "ceres/problem.h"
 #include "ceres/rotation.h"
@@ -73,29 +72,31 @@
   }
 
   ~BundleAdjustmentProblem() {
-    delete []point_index_;
-    delete []camera_index_;
-    delete []observations_;
-    delete []parameters_;
+    delete[] point_index_;
+    delete[] camera_index_;
+    delete[] observations_;
+    delete[] parameters_;
   }
 
   Problem* mutable_problem() { return &problem_; }
   Solver::Options* mutable_solver_options() { return &options_; }
 
-  int num_cameras()            const { return num_cameras_;        }
-  int num_points()             const { return num_points_;         }
-  int num_observations()       const { return num_observations_;   }
-  const int* point_index()     const { return point_index_;  }
+  // clang-format off
+  int num_cameras()            const { return num_cameras_; }
+  int num_points()             const { return num_points_; }
+  int num_observations()       const { return num_observations_; }
+  const int* point_index()     const { return point_index_; }
   const int* camera_index()    const { return camera_index_; }
   const double* observations() const { return observations_; }
-  double* mutable_cameras() { return parameters_; }
-  double* mutable_points() { return parameters_  + 9 * num_cameras_; }
+  double* mutable_cameras()          { return parameters_; }
+  double* mutable_points()           { return parameters_ + 9 * num_cameras_; }
+  // clang-format on
 
   static double kResidualTolerance;
 
  private:
   void ReadData(const string& filename) {
-    FILE * fptr = fopen(filename.c_str(), "r");
+    FILE* fptr = fopen(filename.c_str(), "r");
 
     if (!fptr) {
       LOG(FATAL) << "File Error: unable to open file " << filename;
@@ -106,9 +107,8 @@
     FscanfOrDie(fptr, "%d", &num_points_);
     FscanfOrDie(fptr, "%d", &num_observations_);
 
-    VLOG(1) << "Header: " << num_cameras_
-            << " " << num_points_
-            << " " << num_observations_;
+    VLOG(1) << "Header: " << num_cameras_ << " " << num_points_ << " "
+            << num_observations_;
 
     point_index_ = new int[num_observations_];
     camera_index_ = new int[num_observations_];
@@ -121,7 +121,7 @@
       FscanfOrDie(fptr, "%d", camera_index_ + i);
       FscanfOrDie(fptr, "%d", point_index_ + i);
       for (int j = 0; j < 2; ++j) {
-        FscanfOrDie(fptr, "%lf", observations_ + 2*i + j);
+        FscanfOrDie(fptr, "%lf", observations_ + 2 * i + j);
       }
     }
 
@@ -141,8 +141,8 @@
       // outputs a 2 dimensional residual.
       CostFunction* cost_function =
           new AutoDiffCostFunction<BundlerResidual, 2, 9, 3>(
-              new BundlerResidual(observations_[2*i + 0],
-                                  observations_[2*i + 1]));
+              new BundlerResidual(observations_[2 * i + 0],
+                                  observations_[2 * i + 1]));
 
       // Each observation corresponds to a pair of a camera and a point
       // which are identified by camera_index()[i] and
@@ -170,8 +170,8 @@
     options_.parameter_tolerance = 1e-10;
   }
 
-  template<typename T>
-  void FscanfOrDie(FILE *fptr, const char *format, T *value) {
+  template <typename T>
+  void FscanfOrDie(FILE* fptr, const char* format, T* value) {
     int num_scanned = fscanf(fptr, format, value);
     if (num_scanned != 1) {
       LOG(FATAL) << "Invalid UW data file.";
@@ -186,7 +186,7 @@
   struct BundlerResidual {
     // (u, v): the position of the observation with respect to the image
     // center point.
-    BundlerResidual(double u, double v): u(u), v(v) {}
+    BundlerResidual(double u, double v) : u(u), v(v) {}
 
     template <typename T>
     bool operator()(const T* const camera,
@@ -207,12 +207,12 @@
       // Compute the center of distortion.  The sign change comes from
       // the camera model that Noah Snavely's Bundler assumes, whereby
       // the camera coordinate system has a negative z axis.
-      T xp = - focal * p[0] / p[2];
-      T yp = - focal * p[1] / p[2];
+      T xp = -focal * p[0] / p[2];
+      T yp = -focal * p[1] / p[2];
 
       // Apply second and fourth order radial distortion.
-      T r2 = xp*xp + yp*yp;
-      T distortion = T(1.0) + r2  * (l1 + l2  * r2);
+      T r2 = xp * xp + yp * yp;
+      T distortion = T(1.0) + r2 * (l1 + l2 * r2);
 
       residuals[0] = distortion * xp - u;
       residuals[1] = distortion * yp - v;
diff --git a/internal/ceres/c_api.cc b/internal/ceres/c_api.cc
index 2244909..251cde4 100644
--- a/internal/ceres/c_api.cc
+++ b/internal/ceres/c_api.cc
@@ -34,9 +34,10 @@
 
 #include "ceres/c_api.h"
 
-#include <vector>
 #include <iostream>
 #include <string>
+#include <vector>
+
 #include "ceres/cost_function.h"
 #include "ceres/loss_function.h"
 #include "ceres/problem.h"
@@ -70,8 +71,7 @@
                        int num_residuals,
                        int num_parameter_blocks,
                        int* parameter_block_sizes)
-      : cost_function_(cost_function),
-        user_data_(user_data) {
+      : cost_function_(cost_function), user_data_(user_data) {
     set_num_residuals(num_residuals);
     for (int i = 0; i < num_parameter_blocks; ++i) {
       mutable_parameter_block_sizes()->push_back(parameter_block_sizes[i]);
@@ -81,12 +81,10 @@
   virtual ~CallbackCostFunction() {}
 
   bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const final {
-    return (*cost_function_)(user_data_,
-                             const_cast<double**>(parameters),
-                             residuals,
-                             jacobians);
+                double* residuals,
+                double** jacobians) const final {
+    return (*cost_function_)(
+        user_data_, const_cast<double**>(parameters), residuals, jacobians);
   }
 
  private:
@@ -100,7 +98,7 @@
  public:
   explicit CallbackLossFunction(ceres_loss_function_t loss_function,
                                 void* user_data)
-    : loss_function_(loss_function), user_data_(user_data) {}
+      : loss_function_(loss_function), user_data_(user_data) {}
   void Evaluate(double sq_norm, double* rho) const final {
     (*loss_function_)(user_data_, sq_norm, rho);
   }
@@ -134,8 +132,8 @@
 void ceres_stock_loss_function(void* user_data,
                                double squared_norm,
                                double out[3]) {
-  reinterpret_cast<ceres::LossFunction*>(user_data)
-      ->Evaluate(squared_norm, out);
+  reinterpret_cast<ceres::LossFunction*>(user_data)->Evaluate(squared_norm,
+                                                              out);
 }
 
 ceres_residual_block_id_t* ceres_problem_add_residual_block(
@@ -159,16 +157,15 @@
 
   ceres::LossFunction* callback_loss_function = NULL;
   if (loss_function != NULL) {
-    callback_loss_function = new CallbackLossFunction(loss_function,
-                                                      loss_function_data);
+    callback_loss_function =
+        new CallbackLossFunction(loss_function, loss_function_data);
   }
 
   std::vector<double*> parameter_blocks(parameters,
                                         parameters + num_parameter_blocks);
   return reinterpret_cast<ceres_residual_block_id_t*>(
-      ceres_problem->AddResidualBlock(callback_cost_function,
-                                      callback_loss_function,
-                                      parameter_blocks));
+      ceres_problem->AddResidualBlock(
+          callback_cost_function, callback_loss_function, parameter_blocks));
 }
 
 void ceres_solve(ceres_problem_t* c_problem) {
diff --git a/internal/ceres/c_api_test.cc b/internal/ceres/c_api_test.cc
index fa11249..043f6ab 100644
--- a/internal/ceres/c_api_test.cc
+++ b/internal/ceres/c_api_test.cc
@@ -37,6 +37,7 @@
 
 // Duplicated from curve_fitting.cc.
 int num_observations = 67;
+// clang-format off
 double data[] = {
   0.000000e+00, 1.133898e+00,
   7.500000e-02, 1.334902e+00,
@@ -106,13 +107,14 @@
   4.875000e+00, 4.727863e+00,
   4.950000e+00, 4.669206e+00,
 };
+// clang-format on
 
 // A test cost function, similar to the one in curve_fitting.c.
 static int exponential_residual(void* user_data,
                                 double** parameters,
                                 double* residuals,
                                 double** jacobians) {
-  double* measurement = (double*) user_data;
+  double* measurement = (double*)user_data;
   double x = measurement[0];
   double y = measurement[1];
   double m = parameters[0][0];
@@ -123,10 +125,10 @@
     return 1;
   }
   if (jacobians[0] != NULL) {
-    jacobians[0][0] = - x * exp(m * x + c);  // dr/dm
+    jacobians[0][0] = -x * exp(m * x + c);  // dr/dm
   }
   if (jacobians[1] != NULL) {
-    jacobians[1][0] =     - exp(m * x + c);  // dr/dc
+    jacobians[1][0] = -exp(m * x + c);  // dr/dc
   }
   return 1;
 }
@@ -137,8 +139,8 @@
 TEST(C_API, SimpleEndToEndTest) {
   double m = 0.0;
   double c = 0.0;
-  double *parameter_pointers[] = { &m, &c };
-  int parameter_sizes[] = { 1, 1 };
+  double* parameter_pointers[] = {&m, &c};
+  int parameter_sizes[] = {1, 1};
 
   ceres_problem_t* problem = ceres_create_problem();
   for (int i = 0; i < num_observations; ++i) {
@@ -162,16 +164,14 @@
   ceres_free_problem(problem);
 }
 
-template<typename T>
+template <typename T>
 class ScopedSetValue {
  public:
   ScopedSetValue(T* variable, T new_value)
       : variable_(variable), old_value_(*variable) {
     *variable = new_value;
   }
-  ~ScopedSetValue() {
-    *variable_ = old_value_;
-  }
+  ~ScopedSetValue() { *variable_ = old_value_; }
 
  private:
   T* variable_;
@@ -181,8 +181,8 @@
 TEST(C_API, LossFunctions) {
   double m = 0.2;
   double c = 0.03;
-  double *parameter_pointers[] = { &m, &c };
-  int parameter_sizes[] = { 1, 1 };
+  double* parameter_pointers[] = {&m, &c};
+  int parameter_sizes[] = {1, 1};
 
   // Create two outliers, but be careful to leave the data intact.
   ScopedSetValue<double> outlier1x(&data[12], 2.5);
@@ -191,19 +191,18 @@
   ScopedSetValue<double> outlier2y(&data[15], 30e3);
 
   // Create a cauchy cost function, and reuse it many times.
-  void* cauchy_loss_data =
-      ceres_create_cauchy_loss_function_data(5.0);
+  void* cauchy_loss_data = ceres_create_cauchy_loss_function_data(5.0);
 
   ceres_problem_t* problem = ceres_create_problem();
   for (int i = 0; i < num_observations; ++i) {
     ceres_problem_add_residual_block(
         problem,
-        exponential_residual,  // Cost function
-        &data[2 * i],          // Points to the (x,y) measurement
-        ceres_stock_loss_function,
-        cauchy_loss_data,      // Loss function user data
-        1,                     // Number of residuals
-        2,                     // Number of parameter blocks
+        exponential_residual,       // Cost function
+        &data[2 * i],               // Points to the (x,y) measurement
+        ceres_stock_loss_function,  //
+        cauchy_loss_data,           // Loss function user data
+        1,                          // Number of residuals
+        2,                          // Number of parameter blocks
         parameter_sizes,
         parameter_pointers);
   }
diff --git a/internal/ceres/callbacks.cc b/internal/ceres/callbacks.cc
index 84576e4..0e0df9d 100644
--- a/internal/ceres/callbacks.cc
+++ b/internal/ceres/callbacks.cc
@@ -28,8 +28,10 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include <iostream>  // NO LINT
 #include "ceres/callbacks.h"
+
+#include <iostream>  // NO LINT
+
 #include "ceres/program.h"
 #include "ceres/stringprintf.h"
 #include "glog/logging.h"
@@ -76,8 +78,7 @@
 
 LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
                                  const bool log_to_stdout)
-    : minimizer_type(minimizer_type),
-      log_to_stdout_(log_to_stdout) {}
+    : minimizer_type(minimizer_type), log_to_stdout_(log_to_stdout) {}
 
 LoggingCallback::~LoggingCallback() {}
 
@@ -99,11 +100,13 @@
                           summary.iteration_time_in_seconds,
                           summary.cumulative_time_in_seconds);
   } else if (minimizer_type == TRUST_REGION) {
+    // clang-format off
     if (summary.iteration == 0) {
       output = "iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time\n";  // NOLINT
     }
     const char* kReportRowFormat =
         "% 4d % 8e   % 3.2e   % 3.2e  % 3.2e  % 3.2e % 3.2e     % 4d   % 3.2e   % 3.2e";  // NOLINT
+    // clang-format on
     output += StringPrintf(kReportRowFormat,
                            summary.iteration,
                            summary.cost,
diff --git a/internal/ceres/callbacks.h b/internal/ceres/callbacks.h
index d68bf7f..47112b8 100644
--- a/internal/ceres/callbacks.h
+++ b/internal/ceres/callbacks.h
@@ -32,8 +32,9 @@
 #define CERES_INTERNAL_CALLBACKS_H_
 
 #include <string>
-#include "ceres/iteration_callback.h"
+
 #include "ceres/internal/port.h"
+#include "ceres/iteration_callback.h"
 
 namespace ceres {
 namespace internal {
@@ -47,6 +48,7 @@
   StateUpdatingCallback(Program* program, double* parameters);
   virtual ~StateUpdatingCallback();
   CallbackReturnType operator()(const IterationSummary& summary) final;
+
  private:
   Program* program_;
   double* parameters_;
@@ -61,6 +63,7 @@
                                              double* user_parameters);
   virtual ~GradientProblemSolverStateUpdatingCallback();
   CallbackReturnType operator()(const IterationSummary& summary) final;
+
  private:
   int num_parameters_;
   const double* internal_parameters_;
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
index e927e1f..c193735 100644
--- a/internal/ceres/canonical_views_clustering.cc
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -31,8 +31,8 @@
 
 #include "ceres/canonical_views_clustering.h"
 
-#include <unordered_set>
 #include <unordered_map>
+#include <unordered_set>
 
 #include "ceres/graph.h"
 #include "ceres/map_util.h"
@@ -126,8 +126,7 @@
 
     // Add canonical view if quality improves, or if minimum is not
     // yet met, otherwise break.
-    if ((best_difference <= 0) &&
-        (centers->size() >= options_.min_views)) {
+    if ((best_difference <= 0) && (centers->size() >= options_.min_views)) {
       break;
     }
 
@@ -141,8 +140,7 @@
 
 // Return the set of vertices of the graph which have valid vertex
 // weights.
-void CanonicalViewsClustering::FindValidViews(
-    IntSet* valid_views) const {
+void CanonicalViewsClustering::FindValidViews(IntSet* valid_views) const {
   const IntSet& views = graph_->vertices();
   for (const auto& view : views) {
     if (graph_->VertexWeight(view) != WeightedGraph<int>::InvalidWeight()) {
@@ -154,8 +152,7 @@
 // Computes the difference in the quality score if 'candidate' were
 // added to the set of canonical views.
 double CanonicalViewsClustering::ComputeClusteringQualityDifference(
-    const int candidate,
-    const vector<int>& centers) const {
+    const int candidate, const vector<int>& centers) const {
   // View score.
   double difference =
       options_.view_score_weight * graph_->VertexWeight(candidate);
@@ -179,7 +176,7 @@
   // Orthogonality.
   for (int i = 0; i < centers.size(); ++i) {
     difference -= options_.similarity_penalty_weight *
-        graph_->EdgeWeight(centers[i], candidate);
+                  graph_->EdgeWeight(centers[i], candidate);
   }
 
   return difference;
@@ -192,8 +189,7 @@
   for (const auto& neighbor : neighbors) {
     const double old_similarity =
         FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
-    const double new_similarity =
-        graph_->EdgeWeight(neighbor, canonical_view);
+    const double new_similarity = graph_->EdgeWeight(neighbor, canonical_view);
     if (new_similarity > old_similarity) {
       view_to_canonical_view_[neighbor] = canonical_view;
       view_to_canonical_view_similarity_[neighbor] = new_similarity;
@@ -203,8 +199,7 @@
 
 // Assign a cluster id to each view.
 void CanonicalViewsClustering::ComputeClusterMembership(
-    const vector<int>& centers,
-    IntMap* membership) const {
+    const vector<int>& centers, IntMap* membership) const {
   CHECK(membership != nullptr);
   membership->clear();
 
diff --git a/internal/ceres/canonical_views_clustering_test.cc b/internal/ceres/canonical_views_clustering_test.cc
index 42e05bc..0593d65 100644
--- a/internal/ceres/canonical_views_clustering_test.cc
+++ b/internal/ceres/canonical_views_clustering_test.cc
@@ -32,6 +32,7 @@
 #include "ceres/canonical_views_clustering.h"
 
 #include <unordered_map>
+
 #include "ceres/graph.h"
 #include "gtest/gtest.h"
 
@@ -110,7 +111,6 @@
   EXPECT_EQ(centers_[0], kVertexIds[1]);
 }
 
-
 // Increases view score weight so vertex 2 will be chosen.
 TEST_F(CanonicalViewsTest, ViewScoreTest) {
   options_.min_views = 0;
diff --git a/internal/ceres/casts.h b/internal/ceres/casts.h
index f18fdea..d137071 100644
--- a/internal/ceres/casts.h
+++ b/internal/ceres/casts.h
@@ -56,15 +56,15 @@
 //
 // base::identity_ is used to make a non-deduced context, which
 // forces all callers to explicitly specify the template argument.
-template<typename To>
+template <typename To>
 inline To implicit_cast(typename identity_<To>::type to) {
   return to;
 }
 
 // This version of implicit_cast is used when two template arguments
 // are specified. It's obsolete and should not be used.
-template<typename To, typename From>
-inline To implicit_cast(typename identity_<From>::type const &f) {
+template <typename To, typename From>
+inline To implicit_cast(typename identity_<From>::type const& f) {
   return f;
 }
 
@@ -86,8 +86,8 @@
 //    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
 // You should design the code some other way not to need this.
 
-template<typename To, typename From>     // use like this: down_cast<T*>(foo);
-inline To down_cast(From* f) {                   // so we only accept pointers
+template <typename To, typename From>  // use like this: down_cast<T*>(foo);
+inline To down_cast(From* f) {         // so we only accept pointers
   // Ensures that To is a sub-type of From *.  This test is here only
   // for compile-time type checking, and has no overhead in an
   // optimized build at run-time, as it will be optimized away
diff --git a/internal/ceres/cgnr_linear_operator.h b/internal/ceres/cgnr_linear_operator.h
index 8e8febc..beb8bbc 100644
--- a/internal/ceres/cgnr_linear_operator.h
+++ b/internal/ceres/cgnr_linear_operator.h
@@ -33,8 +33,9 @@
 
 #include <algorithm>
 #include <memory>
-#include "ceres/linear_operator.h"
+
 #include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
 
 namespace ceres {
 namespace internal {
@@ -79,9 +80,8 @@
 // Note: This class is not thread safe, since it uses some temporary storage.
 class CgnrLinearOperator : public LinearOperator {
  public:
-  CgnrLinearOperator(const LinearOperator& A, const double *D)
-      : A_(A), D_(D), z_(new double[A.num_rows()]) {
-  }
+  CgnrLinearOperator(const LinearOperator& A, const double* D)
+      : A_(A), D_(D), z_(new double[A.num_rows()]) {}
   virtual ~CgnrLinearOperator() {}
 
   void RightMultiply(const double* x, double* y) const final {
@@ -96,8 +96,8 @@
     // y = y + DtDx
     if (D_ != NULL) {
       int n = A_.num_cols();
-      VectorRef(y, n).array() += ConstVectorRef(D_, n).array().square() *
-                                 ConstVectorRef(x, n).array();
+      VectorRef(y, n).array() +=
+          ConstVectorRef(D_, n).array().square() * ConstVectorRef(x, n).array();
     }
   }
 
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h
index 5292733..bc701c0 100644
--- a/internal/ceres/cgnr_solver.h
+++ b/internal/ceres/cgnr_solver.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_CGNR_SOLVER_H_
 
 #include <memory>
+
 #include "ceres/linear_solver.h"
 
 namespace ceres {
@@ -55,11 +56,10 @@
   void operator=(const CgnrSolver&) = delete;
   virtual ~CgnrSolver();
 
-  Summary SolveImpl(
-      BlockSparseMatrix* A,
-      const double* b,
-      const LinearSolver::PerSolveOptions& per_solve_options,
-      double* x) final;
+  Summary SolveImpl(BlockSparseMatrix* A,
+                    const double* b,
+                    const LinearSolver::PerSolveOptions& per_solve_options,
+                    double* x) final;
 
  private:
   const LinearSolver::Options options_;
diff --git a/internal/ceres/compressed_col_sparse_matrix_utils.cc b/internal/ceres/compressed_col_sparse_matrix_utils.cc
index 3f6672f..e1f6bb8 100644
--- a/internal/ceres/compressed_col_sparse_matrix_utils.cc
+++ b/internal/ceres/compressed_col_sparse_matrix_utils.cc
@@ -30,8 +30,9 @@
 
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 
-#include <vector>
 #include <algorithm>
+#include <vector>
+
 #include "ceres/internal/port.h"
 #include "glog/logging.h"
 
@@ -40,13 +41,12 @@
 
 using std::vector;
 
-void CompressedColumnScalarMatrixToBlockMatrix(
-    const int* scalar_rows,
-    const int* scalar_cols,
-    const vector<int>& row_blocks,
-    const vector<int>& col_blocks,
-    vector<int>* block_rows,
-    vector<int>* block_cols) {
+void CompressedColumnScalarMatrixToBlockMatrix(const int* scalar_rows,
+                                               const int* scalar_cols,
+                                               const vector<int>& row_blocks,
+                                               const vector<int>& col_blocks,
+                                               vector<int>* block_rows,
+                                               vector<int>* block_cols) {
   CHECK(block_rows != nullptr);
   CHECK(block_cols != nullptr);
   block_rows->clear();
@@ -71,10 +71,8 @@
   for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
     int column_size = 0;
     for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
-      vector<int>::const_iterator it =
-          std::lower_bound(row_block_starts.begin(),
-                           row_block_starts.end(),
-                           scalar_rows[idx]);
+      vector<int>::const_iterator it = std::lower_bound(
+          row_block_starts.begin(), row_block_starts.end(), scalar_rows[idx]);
       // Since we are using lower_bound, it will return the row id
       // where the row block starts. For everything but the first row
       // of the block, where these values will be the same, we can
@@ -104,7 +102,7 @@
 
   // block_starts = [0, block1, block1 + block2 ..]
   vector<int> block_starts(num_blocks);
-  for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
+  for (int i = 0, cursor = 0; i < num_blocks; ++i) {
     block_starts[i] = cursor;
     cursor += blocks[i];
   }
diff --git a/internal/ceres/compressed_col_sparse_matrix_utils.h b/internal/ceres/compressed_col_sparse_matrix_utils.h
index da2109f..8e56519 100644
--- a/internal/ceres/compressed_col_sparse_matrix_utils.h
+++ b/internal/ceres/compressed_col_sparse_matrix_utils.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
 
 #include <vector>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
@@ -58,10 +59,9 @@
 // Given a set of blocks and a permutation of these blocks, compute
 // the corresponding "scalar" ordering, where the scalar ordering of
 // size sum(blocks).
-void BlockOrderingToScalarOrdering(
-    const std::vector<int>& blocks,
-    const std::vector<int>& block_ordering,
-    std::vector<int>* scalar_ordering);
+void BlockOrderingToScalarOrdering(const std::vector<int>& blocks,
+                                   const std::vector<int>& block_ordering,
+                                   std::vector<int>* scalar_ordering);
 
 // Solve the linear system
 //
@@ -101,7 +101,7 @@
       const double v = values[idx];
       rhs_and_solution[c] -= v * rhs_and_solution[r];
     }
-    rhs_and_solution[c] =  rhs_and_solution[c] / values[cols[c + 1] - 1];
+    rhs_and_solution[c] = rhs_and_solution[c] / values[cols[c + 1] - 1];
   }
 }
 
@@ -132,7 +132,7 @@
       const double v = values[idx];
       solution[c] -= v * solution[r];
     }
-    solution[c] =  solution[c] / values[cols[c + 1] - 1];
+    solution[c] = solution[c] / values[cols[c + 1] - 1];
   }
 
   SolveUpperTriangularInPlace(num_cols, rows, cols, values, solution);
diff --git a/internal/ceres/compressed_col_sparse_matrix_utils_test.cc b/internal/ceres/compressed_col_sparse_matrix_utils_test.cc
index ba422a4..339c064 100644
--- a/internal/ceres/compressed_col_sparse_matrix_utils_test.cc
+++ b/internal/ceres/compressed_col_sparse_matrix_utils_test.cc
@@ -28,15 +28,16 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/compressed_col_sparse_matrix_utils.h"
 
 #include <algorithm>
 #include <numeric>
-#include "ceres/compressed_col_sparse_matrix_utils.h"
+
+#include "Eigen/SparseCore"
 #include "ceres/internal/port.h"
 #include "ceres/triplet_sparse_matrix.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
-#include "Eigen/SparseCore"
 
 namespace ceres {
 namespace internal {
@@ -78,9 +79,7 @@
   expected_scalar_ordering.push_back(8);
 
   vector<int> scalar_ordering;
-  BlockOrderingToScalarOrdering(blocks,
-                                block_ordering,
-                                &scalar_ordering);
+  BlockOrderingToScalarOrdering(blocks, block_ordering, &scalar_ordering);
   EXPECT_EQ(scalar_ordering.size(), expected_scalar_ordering.size());
   for (int i = 0; i < expected_scalar_ordering.size(); ++i) {
     EXPECT_EQ(scalar_ordering[i], expected_scalar_ordering[i]);
@@ -92,11 +91,14 @@
                       const int row_block_id,
                       const int col_block_id,
                       vector<Eigen::Triplet<double>>* triplets) {
-  const int row_offset = std::accumulate(&row_blocks[0], &row_blocks[row_block_id], 0);
-  const int col_offset = std::accumulate(&col_blocks[0], &col_blocks[col_block_id], 0);
+  const int row_offset =
+      std::accumulate(&row_blocks[0], &row_blocks[row_block_id], 0);
+  const int col_offset =
+      std::accumulate(&col_blocks[0], &col_blocks[col_block_id], 0);
   for (int r = 0; r < row_blocks[row_block_id]; ++r) {
     for (int c = 0; c < col_blocks[col_block_id]; ++c) {
-      triplets->push_back(Eigen::Triplet<double>(row_offset + r, col_offset + c, 1.0));
+      triplets->push_back(
+          Eigen::Triplet<double>(row_offset + r, col_offset + c, 1.0));
     }
   }
 }
@@ -110,7 +112,6 @@
   // [2]  x x
   // num_nonzeros = 1 + 3 + 4 + 4 + 1 + 2 = 15
 
-
   vector<int> col_blocks;
   col_blocks.push_back(1);
   col_blocks.push_back(2);
@@ -122,8 +123,10 @@
   row_blocks.push_back(2);
   row_blocks.push_back(2);
 
-  const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0.0);
-  const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0.0);
+  const int num_rows =
+      std::accumulate(row_blocks.begin(), row_blocks.end(), 0.0);
+  const int num_cols =
+      std::accumulate(col_blocks.begin(), col_blocks.end(), 0.0);
 
   vector<Eigen::Triplet<double>> triplets;
   FillBlock(row_blocks, col_blocks, 0, 0, &triplets);
@@ -152,13 +155,12 @@
 
   vector<int> compressed_block_rows;
   vector<int> compressed_block_cols;
-  CompressedColumnScalarMatrixToBlockMatrix(
-      sparse_matrix.innerIndexPtr(),
-      sparse_matrix.outerIndexPtr(),
-      row_blocks,
-      col_blocks,
-      &compressed_block_rows,
-      &compressed_block_cols);
+  CompressedColumnScalarMatrixToBlockMatrix(sparse_matrix.innerIndexPtr(),
+                                            sparse_matrix.outerIndexPtr(),
+                                            row_blocks,
+                                            col_blocks,
+                                            &compressed_block_rows,
+                                            &compressed_block_cols);
 
   EXPECT_EQ(compressed_block_rows, expected_compressed_block_rows);
   EXPECT_EQ(compressed_block_cols, expected_compressed_block_cols);
@@ -203,13 +205,10 @@
 
 TEST_F(SolveUpperTriangularTest, SolveInPlace) {
   double rhs_and_solution[] = {1.0, 1.0, 2.0, 2.0};
-  const double expected[] = { -1.4706, -1.0962, 6.6667, 2.2477};
+  const double expected[] = {-1.4706, -1.0962, 6.6667, 2.2477};
 
-  SolveUpperTriangularInPlace<int>(cols.size() - 1,
-                                   &rows[0],
-                                   &cols[0],
-                                   &values[0],
-                                   rhs_and_solution);
+  SolveUpperTriangularInPlace<int>(
+      cols.size() - 1, &rows[0], &cols[0], &values[0], rhs_and_solution);
 
   for (int i = 0; i < 4; ++i) {
     EXPECT_NEAR(rhs_and_solution[i], expected[i], 1e-4) << i;
@@ -218,13 +217,10 @@
 
 TEST_F(SolveUpperTriangularTest, TransposeSolveInPlace) {
   double rhs_and_solution[] = {1.0, 1.0, 2.0, 2.0};
-  double expected[] = {1.970288,  1.242498,  6.081864, -0.057255};
+  double expected[] = {1.970288, 1.242498, 6.081864, -0.057255};
 
-  SolveUpperTriangularTransposeInPlace<int>(cols.size() - 1,
-                                            &rows[0],
-                                            &cols[0],
-                                            &values[0],
-                                            rhs_and_solution);
+  SolveUpperTriangularTransposeInPlace<int>(
+      cols.size() - 1, &rows[0], &cols[0], &values[0], rhs_and_solution);
 
   for (int i = 0; i < 4; ++i) {
     EXPECT_NEAR(rhs_and_solution[i], expected[i], 1e-4) << i;
@@ -233,18 +229,16 @@
 
 TEST_F(SolveUpperTriangularTest, RTRSolveWithSparseRHS) {
   double solution[4];
+  // clang-format off
   double expected[] = { 6.8420e+00,   1.0057e+00,  -1.4907e-16,  -1.9335e+00,
                         1.0057e+00,   2.2275e+00,  -1.9493e+00,  -6.5693e-01,
                         -1.4907e-16,  -1.9493e+00,   1.1111e+01,   9.7381e-17,
                         -1.9335e+00,  -6.5693e-01,   9.7381e-17,   1.2631e+00 };
+  // clang-format on
 
   for (int i = 0; i < 4; ++i) {
-    SolveRTRWithSparseRHS<int>(cols.size() - 1,
-                               &rows[0],
-                               &cols[0],
-                               &values[0],
-                               i,
-                               solution);
+    SolveRTRWithSparseRHS<int>(
+        cols.size() - 1, &rows[0], &cols[0], &values[0], i, solution);
     for (int j = 0; j < 4; ++j) {
       EXPECT_NEAR(solution[j], expected[4 * i + j], 1e-3) << i;
     }
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
index 1fc0116..8e7e3e7 100644
--- a/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -44,23 +44,21 @@
 namespace ceres {
 namespace internal {
 
+using std::adjacent_find;
 using std::make_pair;
 using std::pair;
 using std::vector;
-using std::adjacent_find;
 
 void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
     const Program* program, CompressedRowSparseMatrix* jacobian) {
-  const vector<ParameterBlock*>& parameter_blocks =
-      program->parameter_blocks();
+  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
   vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
   col_blocks.resize(parameter_blocks.size());
   for (int i = 0; i < parameter_blocks.size(); ++i) {
     col_blocks[i] = parameter_blocks[i]->LocalSize();
   }
 
-  const vector<ResidualBlock*>& residual_blocks =
-      program->residual_blocks();
+  const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
   vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
   row_blocks.resize(residual_blocks.size());
   for (int i = 0; i < residual_blocks.size(); ++i) {
@@ -69,11 +67,10 @@
 }
 
 void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
-      const Program* program,
-      int residual_id,
-      vector<pair<int, int>>* evaluated_jacobian_blocks) {
-  const ResidualBlock* residual_block =
-      program->residual_blocks()[residual_id];
+    const Program* program,
+    int residual_id,
+    vector<pair<int, int>>* evaluated_jacobian_blocks) {
+  const ResidualBlock* residual_block = program->residual_blocks()[residual_id];
   const int num_parameter_blocks = residual_block->NumParameterBlocks();
 
   for (int j = 0; j < num_parameter_blocks; ++j) {
@@ -88,8 +85,7 @@
 }
 
 SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
-  const vector<ResidualBlock*>& residual_blocks =
-      program_->residual_blocks();
+  const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
 
   int total_num_residuals = program_->NumResiduals();
   int total_num_effective_parameters = program_->NumEffectiveParameters();
@@ -112,11 +108,10 @@
   // Allocate more space than needed to store the jacobian so that when the LM
   // algorithm adds the diagonal, no reallocation is necessary. This reduces
   // peak memory usage significantly.
-  CompressedRowSparseMatrix* jacobian =
-      new CompressedRowSparseMatrix(
-          total_num_residuals,
-          total_num_effective_parameters,
-          num_jacobian_nonzeros + total_num_effective_parameters);
+  CompressedRowSparseMatrix* jacobian = new CompressedRowSparseMatrix(
+      total_num_residuals,
+      total_num_effective_parameters,
+      num_jacobian_nonzeros + total_num_effective_parameters);
 
   // At this stage, the CompressedRowSparseMatrix is an invalid state. But this
   // seems to be the only way to construct it without doing a memory copy.
@@ -148,8 +143,7 @@
       std::string parameter_block_description;
       for (int j = 0; j < num_parameter_blocks; ++j) {
         ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
-        parameter_block_description +=
-            parameter_block->ToString() + "\n";
+        parameter_block_description += parameter_block->ToString() + "\n";
       }
       LOG(FATAL) << "Ceres internal error: "
                  << "Duplicate parameter blocks detected in a cost function. "
@@ -196,7 +190,7 @@
 
 void CompressedRowJacobianWriter::Write(int residual_id,
                                         int residual_offset,
-                                        double **jacobians,
+                                        double** jacobians,
                                         SparseMatrix* base_jacobian) {
   CompressedRowSparseMatrix* jacobian =
       down_cast<CompressedRowSparseMatrix*>(base_jacobian);
diff --git a/internal/ceres/compressed_row_jacobian_writer.h b/internal/ceres/compressed_row_jacobian_writer.h
index 9fb414e..b1251ca 100644
--- a/internal/ceres/compressed_row_jacobian_writer.h
+++ b/internal/ceres/compressed_row_jacobian_writer.h
@@ -50,8 +50,7 @@
  public:
   CompressedRowJacobianWriter(Evaluator::Options /* ignored */,
                               Program* program)
-    : program_(program) {
-  }
+      : program_(program) {}
 
   // PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
   // row_blocks for a CompressedRowSparseMatrix, based on the
@@ -64,8 +63,7 @@
   // (Jacobian writers do not fall under any type hierarchy; they only
   // have to provide an interface as specified in program_evaluator.h).
   static void PopulateJacobianRowAndColumnBlockVectors(
-      const Program* program,
-      CompressedRowSparseMatrix* jacobian);
+      const Program* program, CompressedRowSparseMatrix* jacobian);
 
   // It is necessary to determine the order of the jacobian blocks
   // before copying them into a CompressedRowSparseMatrix (or derived
@@ -99,7 +97,7 @@
 
   void Write(int residual_id,
              int residual_offset,
-             double **jacobians,
+             double** jacobians,
              SparseMatrix* base_jacobian);
 
  private:
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index e56de16..900586c 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -33,6 +33,7 @@
 #include <algorithm>
 #include <numeric>
 #include <vector>
+
 #include "ceres/crs_matrix.h"
 #include "ceres/internal/port.h"
 #include "ceres/random.h"
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index 758b40b..203c7ea 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
 
 #include <vector>
+
 #include "ceres/internal/port.h"
 #include "ceres/sparse_matrix.h"
 #include "ceres/types.h"
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 214f374..91f3ba4 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -32,6 +32,8 @@
 
 #include <memory>
 #include <numeric>
+
+#include "Eigen/SparseCore"
 #include "ceres/casts.h"
 #include "ceres/crs_matrix.h"
 #include "ceres/internal/eigen.h"
@@ -41,8 +43,6 @@
 #include "glog/logging.h"
 #include "gtest/gtest.h"
 
-#include "Eigen/SparseCore"
-
 namespace ceres {
 namespace internal {
 
@@ -445,11 +445,12 @@
                   0.0,
                   std::numeric_limits<double>::epsilon() * 10)
           << "\n"
-          << dense
-          << "x:\n"
+          << dense << "x:\n"
           << x.transpose() << "\n"
-          << "expected: \n" << expected_y.transpose() << "\n"
-          << "actual: \n" << actual_y.transpose();
+          << "expected: \n"
+          << expected_y.transpose() << "\n"
+          << "actual: \n"
+          << actual_y.transpose();
     }
   }
 }
@@ -513,11 +514,12 @@
                   0.0,
                   std::numeric_limits<double>::epsilon() * 10)
           << "\n"
-          << dense
-          << "x\n"
+          << dense << "x\n"
           << x.transpose() << "\n"
-          << "expected: \n" << expected_y.transpose() << "\n"
-          << "actual: \n" << actual_y.transpose();
+          << "expected: \n"
+          << expected_y.transpose() << "\n"
+          << "actual: \n"
+          << actual_y.transpose();
     }
   }
 }
@@ -579,9 +581,10 @@
                   0.0,
                   std::numeric_limits<double>::epsilon() * 10)
           << "\n"
-          << dense
-          << "expected: \n" << expected.transpose() << "\n"
-          << "actual: \n" << actual.transpose();
+          << dense << "expected: \n"
+          << expected.transpose() << "\n"
+          << "actual: \n"
+          << actual.transpose();
     }
   }
 }
@@ -594,7 +597,6 @@
                       CompressedRowSparseMatrix::UNSYMMETRIC),
     ParamInfoToString);
 
-
 // TODO(sameeragarwal) Add tests for the random matrix creation methods.
 
 }  // namespace internal
diff --git a/internal/ceres/concurrent_queue.h b/internal/ceres/concurrent_queue.h
index 52e2903..a04d147 100644
--- a/internal/ceres/concurrent_queue.h
+++ b/internal/ceres/concurrent_queue.h
@@ -152,7 +152,6 @@
   bool wait_;
 };
 
-
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/concurrent_queue_test.cc b/internal/ceres/concurrent_queue_test.cc
index ce6d687..430111a 100644
--- a/internal/ceres/concurrent_queue_test.cc
+++ b/internal/ceres/concurrent_queue_test.cc
@@ -37,7 +37,6 @@
 #include <thread>
 
 #include "ceres/concurrent_queue.h"
-
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
@@ -304,4 +303,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif // CERES_USE_CXX_THREADS
+#endif  // CERES_USE_CXX_THREADS
diff --git a/internal/ceres/conditioned_cost_function.cc b/internal/ceres/conditioned_cost_function.cc
index d933ad7..fb4c52a 100644
--- a/internal/ceres/conditioned_cost_function.cc
+++ b/internal/ceres/conditioned_cost_function.cc
@@ -68,7 +68,8 @@
 
 ConditionedCostFunction::~ConditionedCostFunction() {
   if (ownership_ == TAKE_OWNERSHIP) {
-    STLDeleteUniqueContainerPointers(conditioners_.begin(), conditioners_.end());
+    STLDeleteUniqueContainerPointers(conditioners_.begin(),
+                                     conditioners_.end());
   } else {
     wrapped_cost_function_.release();
   }
@@ -77,8 +78,8 @@
 bool ConditionedCostFunction::Evaluate(double const* const* parameters,
                                        double* residuals,
                                        double** jacobians) const {
-  bool success = wrapped_cost_function_->Evaluate(parameters, residuals,
-                                                  jacobians);
+  bool success =
+      wrapped_cost_function_->Evaluate(parameters, residuals, jacobians);
   if (!success) {
     return false;
   }
@@ -102,9 +103,8 @@
 
       double unconditioned_residual = residuals[r];
       double* parameter_pointer = &unconditioned_residual;
-      success = conditioners_[r]->Evaluate(&parameter_pointer,
-                                           &residuals[r],
-                                           conditioner_derivative_pointer2);
+      success = conditioners_[r]->Evaluate(
+          &parameter_pointer, &residuals[r], conditioner_derivative_pointer2);
       if (!success) {
         return false;
       }
@@ -117,7 +117,8 @@
             int parameter_block_size =
                 wrapped_cost_function_->parameter_block_sizes()[i];
             VectorRef jacobian_row(jacobians[i] + r * parameter_block_size,
-                                   parameter_block_size, 1);
+                                   parameter_block_size,
+                                   1);
             jacobian_row *= conditioner_derivative;
           }
         }
diff --git a/internal/ceres/conditioned_cost_function_test.cc b/internal/ceres/conditioned_cost_function_test.cc
index cd15507..f21f84c 100644
--- a/internal/ceres/conditioned_cost_function_test.cc
+++ b/internal/ceres/conditioned_cost_function_test.cc
@@ -125,7 +125,8 @@
   // Make a cost function that computes x - v2
   double v2[kTestCostFunctionSize];
   VectorRef v2_vector(v2, kTestCostFunctionSize, 1);
-  Matrix identity = Matrix::Identity(kTestCostFunctionSize, kTestCostFunctionSize);
+  Matrix identity =
+      Matrix::Identity(kTestCostFunctionSize, kTestCostFunctionSize);
   NormalPrior* difference_cost_function = new NormalPrior(identity, v2_vector);
   CostFunction* conditioner = new LinearCostFunction(2, 7);
   std::vector<CostFunction*> conditioners;
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
index c6f85c1..3019628 100644
--- a/internal/ceres/conjugate_gradients_solver.cc
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -41,6 +41,7 @@
 
 #include <cmath>
 #include <cstddef>
+
 #include "ceres/internal/eigen.h"
 #include "ceres/linear_operator.h"
 #include "ceres/stringprintf.h"
@@ -51,16 +52,13 @@
 namespace internal {
 namespace {
 
-bool IsZeroOrInfinity(double x) {
-  return ((x == 0.0) || std::isinf(x));
-}
+bool IsZeroOrInfinity(double x) { return ((x == 0.0) || std::isinf(x)); }
 
 }  // namespace
 
 ConjugateGradientsSolver::ConjugateGradientsSolver(
     const LinearSolver::Options& options)
-    : options_(options) {
-}
+    : options_(options) {}
 
 LinearSolver::Summary ConjugateGradientsSolver::Solve(
     LinearOperator* A,
@@ -137,7 +135,10 @@
         summary.termination_type = LINEAR_SOLVER_FAILURE;
         summary.message = StringPrintf(
             "Numerical failure. beta = rho_n / rho_{n-1} = %e, "
-            "rho_n = %e, rho_{n-1} = %e", beta, rho, last_rho);
+            "rho_n = %e, rho_{n-1} = %e",
+            beta,
+            rho,
+            last_rho);
         break;
       }
       p = z + beta * p;
@@ -152,16 +153,20 @@
       summary.message = StringPrintf(
           "Matrix is indefinite, no more progress can be made. "
           "p'q = %e. |p| = %e, |q| = %e",
-          pq, p.norm(), q.norm());
+          pq,
+          p.norm(),
+          q.norm());
       break;
     }
 
     const double alpha = rho / pq;
     if (std::isinf(alpha)) {
       summary.termination_type = LINEAR_SOLVER_FAILURE;
-      summary.message =
-          StringPrintf("Numerical failure. alpha = rho / pq = %e, "
-                       "rho = %e, pq = %e.", alpha, rho, pq);
+      summary.message = StringPrintf(
+          "Numerical failure. alpha = rho / pq = %e, rho = %e, pq = %e.",
+          alpha,
+          rho,
+          pq);
       break;
     }
 
@@ -223,7 +228,7 @@
     Q0 = Q1;
 
     // Residual based termination.
-    norm_r = r. norm();
+    norm_r = r.norm();
     if (norm_r <= tol_r &&
         summary.num_iterations >= options_.min_num_iterations) {
       summary.termination_type = LINEAR_SOLVER_SUCCESS;
diff --git a/internal/ceres/conjugate_gradients_solver_test.cc b/internal/ceres/conjugate_gradients_solver_test.cc
index 9311998..b11e522 100644
--- a/internal/ceres/conjugate_gradients_solver_test.cc
+++ b/internal/ceres/conjugate_gradients_solver_test.cc
@@ -31,21 +31,23 @@
 // TODO(sameeragarwal): More comprehensive testing with larger and
 // more badly conditioned problem.
 
-#include <memory>
-#include "gtest/gtest.h"
 #include "ceres/conjugate_gradients_solver.h"
+
+#include <memory>
+
+#include "ceres/internal/eigen.h"
 #include "ceres/linear_solver.h"
 #include "ceres/triplet_sparse_matrix.h"
-#include "ceres/internal/eigen.h"
 #include "ceres/types.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
 
 TEST(ConjugateGradientTest, Solves3x3IdentitySystem) {
-  double diagonal[] = { 1.0, 1.0, 1.0 };
-  std::unique_ptr<TripletSparseMatrix>
-      A(TripletSparseMatrix::CreateSparseDiagonalMatrix(diagonal, 3));
+  double diagonal[] = {1.0, 1.0, 1.0};
+  std::unique_ptr<TripletSparseMatrix> A(
+      TripletSparseMatrix::CreateSparseDiagonalMatrix(diagonal, 3));
   Vector b(3);
   Vector x(3);
 
@@ -75,7 +77,6 @@
   ASSERT_DOUBLE_EQ(3, x(2));
 }
 
-
 TEST(ConjuateGradientTest, Solves3x3SymmetricSystem) {
   std::unique_ptr<TripletSparseMatrix> A(new TripletSparseMatrix(3, 3, 9));
   Vector b(3);
diff --git a/internal/ceres/context.cc b/internal/ceres/context.cc
index e223201..55e7635 100644
--- a/internal/ceres/context.cc
+++ b/internal/ceres/context.cc
@@ -34,8 +34,6 @@
 
 namespace ceres {
 
-Context* Context::Create() {
-  return new internal::ContextImpl();
-}
+Context* Context::Create() { return new internal::ContextImpl(); }
 
 }  // namespace ceres
diff --git a/internal/ceres/context_impl.cc b/internal/ceres/context_impl.cc
index 622f33a..20fe5cb 100644
--- a/internal/ceres/context_impl.cc
+++ b/internal/ceres/context_impl.cc
@@ -37,7 +37,6 @@
 #ifdef CERES_USE_CXX_THREADS
   thread_pool.Resize(num_threads);
 #endif  // CERES_USE_CXX_THREADS
-
 }
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/context_impl.h b/internal/ceres/context_impl.h
index 5c03ad7..90ba344 100644
--- a/internal/ceres/context_impl.h
+++ b/internal/ceres/context_impl.h
@@ -32,7 +32,9 @@
 #define CERES_INTERNAL_CONTEXT_IMPL_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clanf-format on
 
 #include "ceres/context.h"
 
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index c5d56f3..93096ac 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -64,8 +64,7 @@
   CHECK(context_ != nullptr);
 }
 
-CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
-}
+CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {}
 
 bool CoordinateDescentMinimizer::Init(
     const Program& program,
@@ -82,13 +81,13 @@
   map<int, set<double*>> group_to_elements = ordering.group_to_elements();
   for (const auto& g_t_e : group_to_elements) {
     const auto& elements = g_t_e.second;
-    for (double* parameter_block: elements) {
+    for (double* parameter_block : elements) {
       parameter_blocks_.push_back(parameter_map.find(parameter_block)->second);
       parameter_block_index[parameter_blocks_.back()] =
           parameter_blocks_.size() - 1;
     }
-    independent_set_offsets_.push_back(
-        independent_set_offsets_.back() + elements.size());
+    independent_set_offsets_.push_back(independent_set_offsets_.back() +
+                                       elements.size());
   }
 
   // The ordering does not have to contain all parameter blocks, so
@@ -126,10 +125,9 @@
   return true;
 }
 
-void CoordinateDescentMinimizer::Minimize(
-    const Minimizer::Options& options,
-    double* parameters,
-    Solver::Summary* summary) {
+void CoordinateDescentMinimizer::Minimize(const Minimizer::Options& options,
+                                          double* parameters,
+                                          Solver::Summary* summary) {
   // Set the state and mark all parameter blocks constant.
   for (int i = 0; i < parameter_blocks_.size(); ++i) {
     ParameterBlock* parameter_block = parameter_blocks_[i];
@@ -202,7 +200,7 @@
         });
   }
 
-  for (int i =  0; i < parameter_blocks_.size(); ++i) {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
     parameter_blocks_[i]->SetVarying();
   }
 
@@ -251,10 +249,10 @@
   // Verify that each group is an independent set
   for (const auto& g_t_e : group_to_elements) {
     if (!program.IsParameterBlockSetIndependent(g_t_e.second)) {
-      *message =
-          StringPrintf("The user-provided "
-                       "parameter_blocks_for_inner_iterations does not "
-                       "form an independent set. Group Id: %d", g_t_e.first);
+      *message = StringPrintf(
+          "The user-provided parameter_blocks_for_inner_iterations does not "
+          "form an independent set. Group Id: %d",
+          g_t_e.first);
       return false;
     }
   }
diff --git a/internal/ceres/corrector.cc b/internal/ceres/corrector.cc
index 4ac0dc3..6a79a06 100644
--- a/internal/ceres/corrector.cc
+++ b/internal/ceres/corrector.cc
@@ -30,8 +30,9 @@
 
 #include "ceres/corrector.h"
 
-#include <cstddef>
 #include <cmath>
+#include <cstddef>
+
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
@@ -147,9 +148,9 @@
     }
 
     for (int r = 0; r < num_rows; ++r) {
-      jacobian[r * num_cols + c] = sqrt_rho1_ *
-          (jacobian[r * num_cols + c] -
-           alpha_sq_norm_ * residuals[r] * r_transpose_j);
+      jacobian[r * num_cols + c] =
+          sqrt_rho1_ * (jacobian[r * num_cols + c] -
+                        alpha_sq_norm_ * residuals[r] * r_transpose_j);
     }
   }
 }
diff --git a/internal/ceres/corrector_test.cc b/internal/ceres/corrector_test.cc
index a6581fd..951041e 100644
--- a/internal/ceres/corrector_test.cc
+++ b/internal/ceres/corrector_test.cc
@@ -32,11 +32,12 @@
 
 #include <algorithm>
 #include <cmath>
-#include <cstring>
 #include <cstdlib>
-#include "gtest/gtest.h"
-#include "ceres/random.h"
+#include <cstring>
+
 #include "ceres/internal/eigen.h"
+#include "ceres/random.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -44,15 +45,13 @@
 // If rho[1] is zero, the Corrector constructor should crash.
 TEST(Corrector, ZeroGradientDeathTest) {
   const double kRho[] = {0.0, 0.0, 1.0};
-  EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
-               ".*");
+  EXPECT_DEATH_IF_SUPPORTED({ Corrector c(1.0, kRho); }, ".*");
 }
 
 // If rho[1] is negative, the Corrector constructor should crash.
 TEST(Corrector, NegativeGradientDeathTest) {
   const double kRho[] = {0.0, -0.1, 1.0};
-  EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
-               ".*");
+  EXPECT_DEATH_IF_SUPPORTED({ Corrector c(1.0, kRho); }, ".*");
 }
 
 TEST(Corrector, ScalarCorrection) {
@@ -68,8 +67,7 @@
 
   // Thus the expected value of the residual is
   // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
-  const double kExpectedResidual =
-      residuals * sqrt(kRho[1]) / (1 - kAlpha);
+  const double kExpectedResidual = residuals * sqrt(kRho[1]) / (1 - kAlpha);
 
   // The jacobian in this case will be
   // sqrt(kRho[1]) * (1 - kAlpha) * jacobian.
@@ -123,13 +121,11 @@
 
   // Thus the expected value of the residual is
   // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
-  const double kExpectedResidual =
-      residuals * sqrt(kRho[1]) / (1.0 - kAlpha);
+  const double kExpectedResidual = residuals * sqrt(kRho[1]) / (1.0 - kAlpha);
 
   // The jacobian in this case will be scaled by
   // sqrt(rho[1]) * (1 - alpha) * J.
-  const double kExpectedJacobian = sqrt(kRho[1]) *
-      (1.0 - kAlpha) * jacobian;
+  const double kExpectedJacobian = sqrt(kRho[1]) * (1.0 - kAlpha) * jacobian;
 
   Corrector c(sq_norm, kRho);
   c.CorrectJacobian(1, 1, &residuals, &jacobian);
@@ -168,10 +164,8 @@
   srand(5);
   for (int iter = 0; iter < 10000; ++iter) {
     // Initialize the jacobian and residual.
-    for (int i = 0; i < 2 * 3; ++i)
-      jacobian[i] = RandDouble();
-    for (int i = 0; i < 3; ++i)
-      residuals[i] = RandDouble();
+    for (int i = 0; i < 2 * 3; ++i) jacobian[i] = RandDouble();
+    for (int i = 0; i < 3; ++i) residuals[i] = RandDouble();
 
     const double sq_norm = res.dot(res);
 
@@ -188,19 +182,19 @@
 
     // Ground truth values.
     g_res = sqrt(rho[1]) / (1.0 - kAlpha) * res;
-    g_jac = sqrt(rho[1]) * (jac - kAlpha / sq_norm *
-                            res * res.transpose() * jac);
+    g_jac =
+        sqrt(rho[1]) * (jac - kAlpha / sq_norm * res * res.transpose() * jac);
 
     g_grad = rho[1] * jac.transpose() * res;
     g_hess = rho[1] * jac.transpose() * jac +
-        2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
+             2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
 
     Corrector c(sq_norm, rho);
     c.CorrectJacobian(3, 2, residuals, jacobian);
     c.CorrectResiduals(3, residuals);
 
     // Corrected gradient and hessian.
-    c_grad  = jac.transpose() * res;
+    c_grad = jac.transpose() * res;
     c_hess = jac.transpose() * jac;
 
     ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
@@ -236,8 +230,7 @@
   srand(5);
   for (int iter = 0; iter < 10000; ++iter) {
     // Initialize the jacobian.
-    for (int i = 0; i < 2 * 3; ++i)
-      jacobian[i] = RandDouble();
+    for (int i = 0; i < 2 * 3; ++i) jacobian[i] = RandDouble();
 
     // Zero residuals
     res.setZero();
@@ -254,7 +247,7 @@
 
     g_grad = rho[1] * jac.transpose() * res;
     g_hess = rho[1] * jac.transpose() * jac +
-        2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
+             2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
 
     Corrector c(sq_norm, rho);
     c.CorrectJacobian(3, 2, residuals, jacobian);
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc
index 0a6d86c..11f47e3 100644
--- a/internal/ceres/cost_function_to_functor_test.cc
+++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -32,9 +32,10 @@
 
 #include <cstdint>
 #include <memory>
+
+#include "ceres/autodiff_cost_function.h"
 #include "ceres/dynamic_autodiff_cost_function.h"
 #include "ceres/dynamic_cost_function_to_functor.h"
-#include "ceres/autodiff_cost_function.h"
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -53,8 +54,7 @@
       cost_function.parameter_block_sizes();
   const vector<int32_t>& actual_parameter_block_sizes =
       actual_cost_function.parameter_block_sizes();
-  EXPECT_EQ(parameter_block_sizes.size(),
-            actual_parameter_block_sizes.size());
+  EXPECT_EQ(parameter_block_sizes.size(), actual_parameter_block_sizes.size());
 
   int num_parameters = 0;
   for (int i = 0; i < parameter_block_sizes.size(); ++i) {
@@ -68,11 +68,12 @@
   }
 
   std::unique_ptr<double[]> residuals(new double[num_residuals]);
-  std::unique_ptr<double[]> jacobians(new double[num_parameters * num_residuals]);
+  std::unique_ptr<double[]> jacobians(
+      new double[num_parameters * num_residuals]);
 
   std::unique_ptr<double[]> actual_residuals(new double[num_residuals]);
-  std::unique_ptr<double[]> actual_jacobians
-      (new double[num_parameters * num_residuals]);
+  std::unique_ptr<double[]> actual_jacobians(
+      new double[num_parameters * num_residuals]);
 
   std::unique_ptr<double*[]> parameter_blocks(
       new double*[parameter_block_sizes.size()]);
@@ -90,19 +91,17 @@
     num_parameters += parameter_block_sizes[i];
   }
 
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.get(),
-                                     residuals.get(), NULL));
-  EXPECT_TRUE(actual_cost_function.Evaluate(parameter_blocks.get(),
-                                            actual_residuals.get(), NULL));
+  EXPECT_TRUE(
+      cost_function.Evaluate(parameter_blocks.get(), residuals.get(), NULL));
+  EXPECT_TRUE(actual_cost_function.Evaluate(
+      parameter_blocks.get(), actual_residuals.get(), NULL));
   for (int i = 0; i < num_residuals; ++i) {
     EXPECT_NEAR(residuals[i], actual_residuals[i], kTolerance)
         << "residual id: " << i;
   }
 
-
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.get(),
-                                     residuals.get(),
-                                     jacobian_blocks.get()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.get(), residuals.get(), jacobian_blocks.get()));
   EXPECT_TRUE(actual_cost_function.Evaluate(parameter_blocks.get(),
                                             actual_residuals.get(),
                                             actual_jacobian_blocks.get()));
@@ -113,8 +112,8 @@
 
   for (int i = 0; i < num_residuals * num_parameters; ++i) {
     EXPECT_NEAR(jacobians[i], actual_jacobians[i], kTolerance)
-        << "jacobian : " << i << " "
-        << jacobians[i] << " " << actual_jacobians[i];
+        << "jacobian : " << i << " " << jacobians[i] << " "
+        << actual_jacobians[i];
   }
 }
 
@@ -132,8 +131,8 @@
  public:
   template <typename T>
   bool operator()(const T* x1, const T* x2, T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1];
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1];
     return true;
   }
 };
@@ -142,8 +141,8 @@
  public:
   template <typename T>
   bool operator()(const T* x1, const T* x2, const T* x3, T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1];
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1];
     return true;
   }
 };
@@ -151,12 +150,12 @@
 struct FourParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1];
+  bool operator()(
+      const T* x1, const T* x2, const T* x3, const T* x4, T* residuals) const {
+    residuals[0] =
+        x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] + x4[0] * x4[0];
+    residuals[1] =
+        x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] + x4[1] * x4[1];
     return true;
   }
 };
@@ -164,12 +163,16 @@
 struct FiveParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  const T* x5, T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0] + x5[0] * x5[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1] + x5[1] * x5[1];
+  bool operator()(const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] +
+                   x4[0] * x4[0] + x5[0] * x5[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] +
+                   x4[1] * x4[1] + x5[1] * x5[1];
     return true;
   }
 };
@@ -177,12 +180,17 @@
 struct SixParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  const T* x5, const T* x6,  T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1];
+  bool operator()(const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] +
+                   x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] +
+                   x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1];
     return true;
   }
 };
@@ -190,12 +198,20 @@
 struct SevenParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  const T* x5, const T* x6, const T* x7, T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1];
+  bool operator()(const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] +
+                   x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] +
+                   x7[0] * x7[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] +
+                   x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] +
+                   x7[1] * x7[1];
     return true;
   }
 };
@@ -203,15 +219,21 @@
 struct EightParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  const T* x5, const T* x6, const T* x7, const T* x8,
+  bool operator()(const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  const T* x8,
                   T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]
-        + x8[0] * x8[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]
-        + x8[1] * x8[1];
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] +
+                   x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] +
+                   x7[0] * x7[0] + x8[0] * x8[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] +
+                   x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] +
+                   x7[1] * x7[1] + x8[1] * x8[1];
     return true;
   }
 };
@@ -219,15 +241,22 @@
 struct NineParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  const T* x5, const T* x6, const T* x7, const T* x8,
-                  const T* x9, T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]
-        + x8[0] * x8[0] + x9[0] * x9[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]
-        + x8[1] * x8[1] + x9[1] * x9[1];
+  bool operator()(const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  const T* x8,
+                  const T* x9,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] +
+                   x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] +
+                   x7[0] * x7[0] + x8[0] * x8[0] + x9[0] * x9[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] +
+                   x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] +
+                   x7[1] * x7[1] + x8[1] * x8[1] + x9[1] * x9[1];
     return true;
   }
 };
@@ -235,15 +264,25 @@
 struct TenParameterBlockFunctor {
  public:
   template <typename T>
-  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
-                  const T* x5, const T* x6, const T* x7, const T* x8,
-                  const T* x9, const T* x10, T* residuals) const {
-    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
-        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]
-        + x8[0] * x8[0] + x9[0] * x9[0] + x10[0] * x10[0];
-    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
-        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]
-        + x8[1] * x8[1] + x9[1] * x9[1] + x10[1] * x10[1];
+  bool operator()(const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  const T* x8,
+                  const T* x9,
+                  const T* x10,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0] + x2[0] * x2[0] + x3[0] * x3[0] +
+                   x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] +
+                   x7[0] * x7[0] + x8[0] * x8[0] + x9[0] * x9[0] +
+                   x10[0] * x10[0];
+    residuals[1] = x1[1] * x1[1] + x2[1] * x2[1] + x3[1] * x3[1] +
+                   x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] +
+                   x7[1] * x7[1] + x8[1] * x8[1] + x9[1] * x9[1] +
+                   x10[1] * x10[1];
     return true;
   }
 };
@@ -281,39 +320,39 @@
 TEST_BODY(OneParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2
+#define PARAMETER_BLOCK_SIZES 2, 2
 TEST_BODY(TwoParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2
 TEST_BODY(ThreeParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2
 TEST_BODY(FourParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2, 2
 TEST_BODY(FiveParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2, 2, 2
 TEST_BODY(SixParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2, 2, 2, 2
 TEST_BODY(SevenParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2, 2, 2, 2, 2
 TEST_BODY(EightParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2, 2, 2, 2, 2, 2
 TEST_BODY(NineParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
-#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2,2,2,2
+#define PARAMETER_BLOCK_SIZES 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
 TEST_BODY(TenParameterBlockFunctor)
 #undef PARAMETER_BLOCK_SIZES
 
@@ -321,14 +360,17 @@
 
 TEST(CostFunctionToFunctor, DynamicNumberOfResiduals) {
   std::unique_ptr<CostFunction> cost_function(
-      new AutoDiffCostFunction<
-      CostFunctionToFunctor<ceres::DYNAMIC, 2, 2 >, ceres::DYNAMIC, 2, 2>(
-          new CostFunctionToFunctor<ceres::DYNAMIC, 2, 2 >(
-              new AutoDiffCostFunction<TwoParameterBlockFunctor, 2, 2, 2 >(
-                  new TwoParameterBlockFunctor)), 2));
+      new AutoDiffCostFunction<CostFunctionToFunctor<ceres::DYNAMIC, 2, 2>,
+                               ceres::DYNAMIC,
+                               2,
+                               2>(
+          new CostFunctionToFunctor<ceres::DYNAMIC, 2, 2>(
+              new AutoDiffCostFunction<TwoParameterBlockFunctor, 2, 2, 2>(
+                  new TwoParameterBlockFunctor)),
+          2));
 
   std::unique_ptr<CostFunction> actual_cost_function(
-      new AutoDiffCostFunction<TwoParameterBlockFunctor, 2, 2, 2 >(
+      new AutoDiffCostFunction<TwoParameterBlockFunctor, 2, 2, 2>(
           new TwoParameterBlockFunctor));
   ExpectCostFunctionsAreEqual(*cost_function, *actual_cost_function);
 }
@@ -336,8 +378,8 @@
 TEST(CostFunctionToFunctor, DynamicCostFunctionToFunctor) {
   DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>*
       actual_cost_function(
-      new DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>(
-          new DynamicTwoParameterBlockFunctor));
+          new DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>(
+              new DynamicTwoParameterBlockFunctor));
   actual_cost_function->AddParameterBlock(2);
   actual_cost_function->AddParameterBlock(2);
   actual_cost_function->SetNumResiduals(2);
diff --git a/internal/ceres/covariance.cc b/internal/ceres/covariance.cc
index 8256078..8e240ff 100644
--- a/internal/ceres/covariance.cc
+++ b/internal/ceres/covariance.cc
@@ -32,6 +32,7 @@
 
 #include <utility>
 #include <vector>
+
 #include "ceres/covariance_impl.h"
 #include "ceres/problem.h"
 #include "ceres/problem_impl.h"
@@ -46,8 +47,7 @@
   impl_.reset(new internal::CovarianceImpl(options));
 }
 
-Covariance::~Covariance() {
-}
+Covariance::~Covariance() {}
 
 bool Covariance::Compute(
     const vector<pair<const double*, const double*>>& covariance_blocks,
@@ -55,9 +55,8 @@
   return impl_->Compute(covariance_blocks, problem->impl_.get());
 }
 
-bool Covariance::Compute(
-    const vector<const double*>& parameter_blocks,
-    Problem* problem) {
+bool Covariance::Compute(const vector<const double*>& parameter_blocks,
+                         Problem* problem) {
   return impl_->Compute(parameter_blocks, problem->impl_.get());
 }
 
@@ -89,8 +88,8 @@
 }
 
 bool Covariance::GetCovarianceMatrixInTangentSpace(
-    const std::vector<const double *>& parameter_blocks,
-    double *covariance_matrix) const {
+    const std::vector<const double*>& parameter_blocks,
+    double* covariance_matrix) const {
   return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
                                                            false,  // tangent
                                                            covariance_matrix);
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
index 34a36c6..229173f 100644
--- a/internal/ceres/covariance_test.cc
+++ b/internal/ceres/covariance_test.cc
@@ -31,8 +31,8 @@
 #include "ceres/covariance.h"
 
 #include <algorithm>
-#include <cstdint>
 #include <cmath>
+#include <cstdint>
 #include <map>
 #include <memory>
 #include <utility>
@@ -54,7 +54,7 @@
 using std::pair;
 using std::vector;
 
-class UnaryCostFunction: public CostFunction {
+class UnaryCostFunction : public CostFunction {
  public:
   UnaryCostFunction(const int num_residuals,
                     const int32_t parameter_block_size,
@@ -86,8 +86,7 @@
   vector<double> jacobian_;
 };
 
-
-class BinaryCostFunction: public CostFunction {
+class BinaryCostFunction : public CostFunction {
  public:
   BinaryCostFunction(const int num_residuals,
                      const int32_t parameter_block1_size,
@@ -193,6 +192,7 @@
   //  . . . . . . X X X X
   //  . . . . . . X X X X
 
+  // clang-format off
   int expected_rows[] = {0, 5, 10, 15, 18, 21, 24, 28, 32, 36, 40};
   int expected_cols[] = {0, 6, 7, 8, 9,
                          1, 2, 3, 4, 5,
@@ -204,7 +204,7 @@
                          6, 7, 8, 9,
                          6, 7, 8, 9,
                          6, 7, 8, 9};
-
+  // clang-format on
 
   vector<pair<const double*, const double*>> covariance_blocks;
   covariance_blocks.push_back(make_pair(block1, block1));
@@ -216,8 +216,8 @@
 
   Covariance::Options options;
   CovarianceImpl covariance_impl(options);
-  EXPECT_TRUE(covariance_impl
-              .ComputeCovarianceSparsity(covariance_blocks, &problem));
+  EXPECT_TRUE(
+      covariance_impl.ComputeCovarianceSparsity(covariance_blocks, &problem));
 
   const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();
 
@@ -228,17 +228,13 @@
   const int* rows = crsm->rows();
   for (int r = 0; r < crsm->num_rows() + 1; ++r) {
     EXPECT_EQ(rows[r], expected_rows[r])
-        << r << " "
-        << rows[r] << " "
-        << expected_rows[r];
+        << r << " " << rows[r] << " " << expected_rows[r];
   }
 
   const int* cols = crsm->cols();
   for (int c = 0; c < crsm->num_nonzeros(); ++c) {
     EXPECT_EQ(cols[c], expected_cols[c])
-        << c << " "
-        << cols[c] << " "
-        << expected_cols[c];
+        << c << " " << cols[c] << " " << expected_cols[c];
   }
 }
 
@@ -280,6 +276,7 @@
   //  . . . X X X X
   //  . . . X X X X
 
+  // clang-format off
   int expected_rows[] = {0, 5, 7, 9, 13, 17, 21, 25};
   int expected_cols[] = {0, 3, 4, 5, 6,
                          1, 2,
@@ -288,6 +285,7 @@
                          3, 4, 5, 6,
                          3, 4, 5, 6,
                          3, 4, 5, 6};
+  // clang-format on
 
   vector<pair<const double*, const double*>> covariance_blocks;
   covariance_blocks.push_back(make_pair(block1, block1));
@@ -299,8 +297,8 @@
 
   Covariance::Options options;
   CovarianceImpl covariance_impl(options);
-  EXPECT_TRUE(covariance_impl
-              .ComputeCovarianceSparsity(covariance_blocks, &problem));
+  EXPECT_TRUE(
+      covariance_impl.ComputeCovarianceSparsity(covariance_blocks, &problem));
 
   const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();
 
@@ -311,17 +309,13 @@
   const int* rows = crsm->rows();
   for (int r = 0; r < crsm->num_rows() + 1; ++r) {
     EXPECT_EQ(rows[r], expected_rows[r])
-        << r << " "
-        << rows[r] << " "
-        << expected_rows[r];
+        << r << " " << rows[r] << " " << expected_rows[r];
   }
 
   const int* cols = crsm->cols();
   for (int c = 0; c < crsm->num_nonzeros(); ++c) {
     EXPECT_EQ(cols[c], expected_cols[c])
-        << c << " "
-        << cols[c] << " "
-        << expected_cols[c];
+        << c << " " << cols[c] << " " << expected_cols[c];
   }
 }
 
@@ -361,6 +355,7 @@
   //  . . . X X X X
   //  . . . X X X X
 
+  // clang-format off
   int expected_rows[] = {0, 5, 7, 9, 13, 17, 21, 25};
   int expected_cols[] = {0, 3, 4, 5, 6,
                          1, 2,
@@ -369,6 +364,7 @@
                          3, 4, 5, 6,
                          3, 4, 5, 6,
                          3, 4, 5, 6};
+  // clang-format on
 
   vector<pair<const double*, const double*>> covariance_blocks;
   covariance_blocks.push_back(make_pair(block1, block1));
@@ -380,8 +376,8 @@
 
   Covariance::Options options;
   CovarianceImpl covariance_impl(options);
-  EXPECT_TRUE(covariance_impl
-              .ComputeCovarianceSparsity(covariance_blocks, &problem));
+  EXPECT_TRUE(
+      covariance_impl.ComputeCovarianceSparsity(covariance_blocks, &problem));
 
   const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();
 
@@ -392,17 +388,13 @@
   const int* rows = crsm->rows();
   for (int r = 0; r < crsm->num_rows() + 1; ++r) {
     EXPECT_EQ(rows[r], expected_rows[r])
-        << r << " "
-        << rows[r] << " "
-        << expected_rows[r];
+        << r << " " << rows[r] << " " << expected_rows[r];
   }
 
   const int* cols = crsm->cols();
   for (int c = 0; c < crsm->num_nonzeros(); ++c) {
     EXPECT_EQ(cols[c], expected_cols[c])
-        << c << " "
-        << cols[c] << " "
-        << expected_cols[c];
+        << c << " " << cols[c] << " " << expected_cols[c];
   }
 }
 
@@ -423,40 +415,33 @@
     z[0] = 3;
 
     {
-      double jacobian[] = { 1.0, 0.0, 0.0, 1.0};
+      double jacobian[] = {1.0, 0.0, 0.0, 1.0};
       problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);
     }
 
     {
-      double jacobian[] = { 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0 };
+      double jacobian[] = {2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0};
       problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);
     }
 
     {
       double jacobian = 5.0;
-      problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian),
-                                NULL,
-                                z);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(1, 1, &jacobian), NULL, z);
     }
 
     {
-      double jacobian1[] = { 1.0, 2.0, 3.0 };
-      double jacobian2[] = { -5.0, -6.0 };
+      double jacobian1[] = {1.0, 2.0, 3.0};
+      double jacobian2[] = {-5.0, -6.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2),
-          NULL,
-          y,
-          x);
+          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), NULL, y, x);
     }
 
     {
-      double jacobian1[] = {2.0 };
-      double jacobian2[] = { 3.0, -2.0 };
+      double jacobian1[] = {2.0};
+      double jacobian2[] = {3.0, -2.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2),
-          NULL,
-          z,
-          x);
+          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), NULL, z, x);
     }
 
     all_covariance_blocks_.push_back(make_pair(x, x));
@@ -482,8 +467,7 @@
 
   // Computes covariance in tangent space.
   void ComputeAndCompareCovarianceBlocksInTangentSpace(
-                                         const Covariance::Options& options,
-                                         const double* expected_covariance) {
+      const Covariance::Options& options, const double* expected_covariance) {
     ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(
         options,
         false,  // tangent
@@ -549,8 +533,9 @@
                                     bool lift_covariance_to_ambient_space,
                                     const Covariance& covariance,
                                     const double* expected_covariance) {
-    const BoundsMap& column_bounds = lift_covariance_to_ambient_space ?
-        column_bounds_ : local_column_bounds_;
+    const BoundsMap& column_bounds = lift_covariance_to_ambient_space
+                                         ? column_bounds_
+                                         : local_column_bounds_;
     const int row_begin = FindOrDie(column_bounds, block1).first;
     const int row_end = FindOrDie(column_bounds, block1).second;
     const int col_begin = FindOrDie(column_bounds, block2).first;
@@ -558,13 +543,10 @@
 
     Matrix actual(row_end - row_begin, col_end - col_begin);
     if (lift_covariance_to_ambient_space) {
-      EXPECT_TRUE(covariance.GetCovarianceBlock(block1,
-                                                block2,
-                                                actual.data()));
+      EXPECT_TRUE(covariance.GetCovarianceBlock(block1, block2, actual.data()));
     } else {
-      EXPECT_TRUE(covariance.GetCovarianceBlockInTangentSpace(block1,
-                                                              block2,
-                                                              actual.data()));
+      EXPECT_TRUE(covariance.GetCovarianceBlockInTangentSpace(
+          block1, block2, actual.data()));
     }
 
     int dof = 0;  // degrees of freedom = sum of LocalSize()s
@@ -572,22 +554,22 @@
       dof = std::max(dof, bound.second.second);
     }
     ConstMatrixRef expected(expected_covariance, dof, dof);
-    double diff_norm = (expected.block(row_begin,
-                                       col_begin,
-                                       row_end - row_begin,
-                                       col_end - col_begin) - actual).norm();
+    double diff_norm =
+        (expected.block(
+             row_begin, col_begin, row_end - row_begin, col_end - col_begin) -
+         actual)
+            .norm();
     diff_norm /= (row_end - row_begin) * (col_end - col_begin);
 
     const double kTolerance = 1e-5;
     EXPECT_NEAR(diff_norm, 0.0, kTolerance)
         << "rows: " << row_begin << " " << row_end << "  "
         << "cols: " << col_begin << " " << col_end << "  "
-        << "\n\n expected: \n " << expected.block(row_begin,
-                                                  col_begin,
-                                                  row_end - row_begin,
-                                                  col_end - col_begin)
-        << "\n\n actual: \n " << actual
-        << "\n\n full expected: \n" << expected;
+        << "\n\n expected: \n "
+        << expected.block(
+               row_begin, col_begin, row_end - row_begin, col_end - col_begin)
+        << "\n\n actual: \n " << actual << "\n\n full expected: \n"
+        << expected;
   }
 
   double parameters_[6];
@@ -597,7 +579,6 @@
   BoundsMap local_column_bounds_;
 };
 
-
 TEST_F(CovarianceTest, NormalBehavior) {
   // J
   //
@@ -620,6 +601,7 @@
   //    6  -4  0   0   0 29
 
   // inv(J'J) computed using octave.
+  // clang-format off
   double expected_covariance[] = {
      7.0747e-02,  -8.4923e-03,   1.6821e-02,   3.3643e-02,   5.0464e-02,  -1.5809e-02,  // NOLINT
     -8.4923e-03,   8.1352e-02,   2.4758e-02,   4.9517e-02,   7.4275e-02,   1.2978e-02,  // NOLINT
@@ -628,6 +610,7 @@
      5.0464e-02,   7.4275e-02,  -2.8906e-03,  -5.7813e-03,   2.4133e-01,  -1.9598e-04,  // NOLINT
     -1.5809e-02,   1.2978e-02,  -6.5325e-05,  -1.3065e-04,  -1.9598e-04,   3.9544e-02,  // NOLINT
   };
+  // clang-format on
 
   Covariance::Options options;
 
@@ -669,6 +652,7 @@
   //    6  -4  0   0   0 29
 
   // inv(J'J) computed using octave.
+  // clang-format off
   double expected_covariance[] = {
      7.0747e-02,  -8.4923e-03,   1.6821e-02,   3.3643e-02,   5.0464e-02,  -1.5809e-02,  // NOLINT
     -8.4923e-03,   8.1352e-02,   2.4758e-02,   4.9517e-02,   7.4275e-02,   1.2978e-02,  // NOLINT
@@ -677,6 +661,7 @@
      5.0464e-02,   7.4275e-02,  -2.8906e-03,  -5.7813e-03,   2.4133e-01,  -1.9598e-04,  // NOLINT
     -1.5809e-02,   1.2978e-02,  -6.5325e-05,  -1.3065e-04,  -1.9598e-04,   3.9544e-02,  // NOLINT
   };
+  // clang-format on
 
   Covariance::Options options;
   options.num_threads = 4;
@@ -721,6 +706,7 @@
   //  0  0  0  0  0 29
 
   // pinv(J'J) computed using octave.
+  // clang-format off
   double expected_covariance[] = {
               0,            0,            0,            0,            0,            0,  // NOLINT
               0,            0,            0,            0,            0,            0,  // NOLINT
@@ -728,6 +714,7 @@
               0,            0,     -0.02778,      0.19444,     -0.08333,     -0.00000,  // NOLINT
               0,            0,     -0.04167,     -0.08333,      0.12500,     -0.00000,  // NOLINT
               0,            0,     -0.00000,     -0.00000,     -0.00000,      0.03448   // NOLINT
+      // clang-format on
   };
 
   Covariance::Options options;
@@ -778,6 +765,7 @@
 
   // A * inv((J*A)'*(J*A)) * A'
   // Computed using octave.
+  // clang-format off
   double expected_covariance[] = {
     0.01766,   0.01766,   0.02158,   0.04316,   0.00000,  -0.00122,
     0.01766,   0.01766,   0.02158,   0.04316,   0.00000,  -0.00122,
@@ -786,6 +774,7 @@
     0.00000,   0.00000,   0.00000,   0.00000,   0.00000,   0.00000,
    -0.00122,  -0.00122,  -0.00149,  -0.00298,   0.00000,   0.03457
   };
+  // clang-format on
 
   Covariance::Options options;
 
@@ -840,12 +829,14 @@
 
   // inv((J*A)'*(J*A))
   // Computed using octave.
+  // clang-format off
   double expected_covariance[] = {
     0.01766,   0.02158,   0.04316,   -0.00122,
     0.02158,   0.24860,  -0.00281,   -0.00149,
     0.04316,  -0.00281,   0.24439,   -0.00298,
    -0.00122,  -0.00149,  -0.00298,    0.03457  // NOLINT
   };
+  // clang-format on
 
   Covariance::Options options;
 
@@ -903,12 +894,14 @@
 
   // pinv((J*A)'*(J*A))
   // Computed using octave.
+  // clang-format off
   double expected_covariance[] = {
     0.0, 0.0, 0.0, 0.0,
     0.0, 0.0, 0.0, 0.0,
     0.0, 0.0, 0.0, 0.0,
     0.0, 0.0, 0.0, 0.034482 // NOLINT
   };
+  // clang-format on
 
   Covariance::Options options;
 
@@ -951,6 +944,7 @@
   // 3.4142 is the smallest eigen value of J'J. The following matrix
   // was obtained by dropping the eigenvector corresponding to this
   // eigenvalue.
+  // clang-format off
   double expected_covariance[] = {
      5.4135e-02,  -3.5121e-02,   1.7257e-04,   3.4514e-04,   5.1771e-04,  -1.6076e-02,  // NOLINT
     -3.5121e-02,   3.8667e-02,  -1.9288e-03,  -3.8576e-03,  -5.7864e-03,   1.2549e-02,  // NOLINT
@@ -959,7 +953,7 @@
      5.1771e-04,  -5.7864e-03,  -5.2946e-02,  -1.0589e-01,   9.1162e-02,  -9.9988e-04,  // NOLINT
     -1.6076e-02,   1.2549e-02,  -3.3329e-04,  -6.6659e-04,  -9.9988e-04,   3.9539e-02   // NOLINT
   };
-
+  // clang-format on
 
   {
     Covariance::Options options;
@@ -1109,40 +1103,33 @@
     double* z = y + 3;
 
     {
-      double jacobian[] = { 1.0, 0.0, 0.0, 1.0};
+      double jacobian[] = {1.0, 0.0, 0.0, 1.0};
       problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);
     }
 
     {
-      double jacobian[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+      double jacobian[] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
       problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);
     }
 
     {
       double jacobian = 5.0;
-      problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian),
-                                NULL,
-                                z);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(1, 1, &jacobian), NULL, z);
     }
 
     {
-      double jacobian1[] = { 0.0, 0.0, 0.0 };
-      double jacobian2[] = { -5.0, -6.0 };
+      double jacobian1[] = {0.0, 0.0, 0.0};
+      double jacobian2[] = {-5.0, -6.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2),
-          NULL,
-          y,
-          x);
+          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), NULL, y, x);
     }
 
     {
-      double jacobian1[] = {2.0 };
-      double jacobian2[] = { 3.0, -2.0 };
+      double jacobian1[] = {2.0};
+      double jacobian2[] = {3.0, -2.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2),
-          NULL,
-          z,
-          x);
+          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), NULL, z, x);
     }
 
     all_covariance_blocks_.push_back(make_pair(x, x));
@@ -1180,6 +1167,7 @@
   //   6 -4  0  0  0 29
 
   // pinv(J'J) computed using octave.
+  // clang-format off
   double expected_covariance[] = {
      0.053998,  -0.033145,   0.000000,   0.000000,   0.000000,  -0.015744,
     -0.033145,   0.045067,   0.000000,   0.000000,   0.000000,   0.013074,
@@ -1188,6 +1176,7 @@
      0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,
     -0.015744,   0.013074,   0.000000,   0.000000,   0.000000,   0.039543
   };
+  // clang-format on
 
   Covariance::Options options;
   options.algorithm_type = DENSE_SVD;
@@ -1290,11 +1279,11 @@
       jacobian *= (i + 1);
 
       double* block_i = parameters_.get() + i * parameter_block_size_;
-      problem_.AddResidualBlock(new UnaryCostFunction(parameter_block_size_,
-                                                      parameter_block_size_,
-                                                      jacobian.data()),
-                                NULL,
-                                block_i);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(
+              parameter_block_size_, parameter_block_size_, jacobian.data()),
+          NULL,
+          block_i);
       for (int j = i; j < num_parameter_blocks_; ++j) {
         double* block_j = parameters_.get() + j * parameter_block_size_;
         all_covariance_blocks_.push_back(make_pair(block_i, block_j));
@@ -1326,8 +1315,10 @@
       covariance.GetCovarianceBlock(block_i, block_i, actual.data());
       EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance)
           << "block: " << i << ", " << i << "\n"
-          << "expected: \n" << expected << "\n"
-          << "actual: \n" << actual;
+          << "expected: \n"
+          << expected << "\n"
+          << "actual: \n"
+          << actual;
 
       expected.setZero();
       for (int j = i + 1; j < num_parameter_blocks_; ++j) {
@@ -1335,8 +1326,10 @@
         covariance.GetCovarianceBlock(block_i, block_j, actual.data());
         EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance)
             << "block: " << i << ", " << j << "\n"
-            << "expected: \n" << expected << "\n"
-            << "actual: \n" << actual;
+            << "expected: \n"
+            << expected << "\n"
+            << "actual: \n"
+            << actual;
       }
     }
   }
diff --git a/internal/ceres/cubic_interpolation_test.cc b/internal/ceres/cubic_interpolation_test.cc
index 4cf27ff..3907d22 100644
--- a/internal/ceres/cubic_interpolation_test.cc
+++ b/internal/ceres/cubic_interpolation_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/cubic_interpolation.h"
 
 #include <memory>
+
 #include "ceres/jet.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
@@ -65,9 +66,11 @@
 }
 
 TEST(Grid1D, TwoDataDimensionIntegerDataInterleaved) {
+  // clang-format off
   int x[] = {1, 5,
              2, 6,
              3, 7};
+  // clang-format on
 
   Grid1D<int, 2, true> grid(x, 0, 3);
   for (int i = 0; i < 3; ++i) {
@@ -78,10 +81,11 @@
   }
 }
 
-
 TEST(Grid1D, TwoDataDimensionIntegerDataStacked) {
+  // clang-format off
   int x[] = {1, 2, 3,
              5, 6, 7};
+  // clang-format on
 
   Grid1D<int, 2, false> grid(x, 0, 3);
   for (int i = 0; i < 3; ++i) {
@@ -93,8 +97,10 @@
 }
 
 TEST(Grid2D, OneDataDimensionRowMajor) {
+  // clang-format off
   int x[] = {1, 2, 3,
              2, 3, 4};
+  // clang-format on
   Grid2D<int, 1, true, true> grid(x, 0, 2, 0, 3);
   for (int r = 0; r < 2; ++r) {
     for (int c = 0; c < 3; ++c) {
@@ -106,8 +112,10 @@
 }
 
 TEST(Grid2D, OneDataDimensionRowMajorOutOfBounds) {
+  // clang-format off
   int x[] = {1, 2, 3,
              2, 3, 4};
+  // clang-format on
   Grid2D<int, 1, true, true> grid(x, 0, 2, 0, 3);
   double value;
   grid.GetValue(-1, -1, &value);
@@ -141,64 +149,72 @@
 }
 
 TEST(Grid2D, TwoDataDimensionRowMajorInterleaved) {
+  // clang-format off
   int x[] = {1, 4, 2, 8, 3, 12,
              2, 8, 3, 12, 4, 16};
+  // clang-format on
   Grid2D<int, 2, true, true> grid(x, 0, 2, 0, 3);
   for (int r = 0; r < 2; ++r) {
     for (int c = 0; c < 3; ++c) {
       double value[2];
       grid.GetValue(r, c, value);
       EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
-      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+      EXPECT_EQ(value[1], static_cast<double>(4 * (r + c + 1)));
     }
   }
 }
 
 TEST(Grid2D, TwoDataDimensionRowMajorStacked) {
+  // clang-format off
   int x[] = {1,  2,  3,
              2,  3,  4,
              4,  8, 12,
              8, 12, 16};
+  // clang-format on
   Grid2D<int, 2, true, false> grid(x, 0, 2, 0, 3);
   for (int r = 0; r < 2; ++r) {
     for (int c = 0; c < 3; ++c) {
       double value[2];
       grid.GetValue(r, c, value);
       EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
-      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+      EXPECT_EQ(value[1], static_cast<double>(4 * (r + c + 1)));
     }
   }
 }
 
 TEST(Grid2D, TwoDataDimensionColMajorInterleaved) {
+  // clang-format off
   int x[] = { 1,  4, 2,  8,
               2,  8, 3, 12,
               3, 12, 4, 16};
+  // clang-format on
   Grid2D<int, 2, false, true> grid(x, 0, 2, 0, 3);
   for (int r = 0; r < 2; ++r) {
     for (int c = 0; c < 3; ++c) {
       double value[2];
       grid.GetValue(r, c, value);
       EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
-      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+      EXPECT_EQ(value[1], static_cast<double>(4 * (r + c + 1)));
     }
   }
 }
 
 TEST(Grid2D, TwoDataDimensionColMajorStacked) {
+  // clang-format off
   int x[] = {1,   2,
              2,   3,
              3,   4,
              4,   8,
              8,  12,
              12, 16};
+  // clang-format on
   Grid2D<int, 2, false, false> grid(x, 0, 2, 0, 3);
   for (int r = 0; r < 2; ++r) {
     for (int c = 0; c < 3; ++c) {
       double value[2];
       grid.GetValue(r, c, value);
       EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
-      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+      EXPECT_EQ(value[1], static_cast<double>(4 * (r + c + 1)));
     }
   }
 }
@@ -214,8 +230,8 @@
 
     for (int x = 0; x < kNumSamples; ++x) {
       for (int dim = 0; dim < kDataDimension; ++dim) {
-      values_[x * kDataDimension + dim] =
-          (dim * dim  + 1) * (a  * x * x * x + b * x * x + c * x + d);
+        values_[x * kDataDimension + dim] =
+            (dim * dim + 1) * (a * x * x * x + b * x * x + c * x + d);
       }
     }
 
@@ -236,8 +252,9 @@
 
       for (int dim = 0; dim < kDataDimension; ++dim) {
         expected_f[dim] =
-            (dim * dim  + 1) * (a  * x * x * x + b * x * x + c * x + d);
-        expected_dfdx[dim] = (dim * dim + 1) * (3.0 * a * x * x + 2.0 * b * x + c);
+            (dim * dim + 1) * (a * x * x * x + b * x * x + c * x + d);
+        expected_dfdx[dim] =
+            (dim * dim + 1) * (3.0 * a * x * x + 2.0 * b * x + c);
       }
 
       interpolator.Evaluate(x, f, dfdx);
@@ -278,7 +295,6 @@
   RunPolynomialInterpolationTest<3>(0.0, 0.4, 1.0, 0.5);
 }
 
-
 TEST(CubicInterpolator, JetEvaluation) {
   const double values[] = {1.0, 2.0, 2.0, 5.0, 3.0, 9.0, 2.0, 7.0};
 
@@ -330,7 +346,8 @@
       }
     }
 
-    Grid2D<double, kDataDimension> grid(values_.get(), 0, kNumRows, 0, kNumCols);
+    Grid2D<double, kDataDimension> grid(
+        values_.get(), 0, kNumRows, 0, kNumCols);
     BiCubicInterpolator<Grid2D<double, kDataDimension>> interpolator(grid);
 
     for (int j = 0; j < kNumRowSamples; ++j) {
@@ -341,8 +358,10 @@
         interpolator.Evaluate(r, c, f, dfdr, dfdc);
         for (int dim = 0; dim < kDataDimension; ++dim) {
           EXPECT_NEAR(f[dim], (dim * dim + 1) * EvaluateF(r, c), kTolerance);
-          EXPECT_NEAR(dfdr[dim], (dim * dim + 1) * EvaluatedFdr(r, c), kTolerance);
-          EXPECT_NEAR(dfdc[dim], (dim * dim + 1) * EvaluatedFdc(r, c), kTolerance);
+          EXPECT_NEAR(
+              dfdr[dim], (dim * dim + 1) * EvaluatedFdr(r, c), kTolerance);
+          EXPECT_NEAR(
+              dfdc[dim], (dim * dim + 1) * EvaluatedFdc(r, c), kTolerance);
         }
       }
     }
@@ -373,7 +392,6 @@
     return (coeff_.row(1) + coeff_.col(1).transpose()) * x;
   }
 
-
   Eigen::Matrix3d coeff_;
   static constexpr int kNumRows = 10;
   static constexpr int kNumCols = 10;
@@ -471,8 +489,10 @@
 }
 
 TEST(BiCubicInterpolator, JetEvaluation) {
+  // clang-format off
   const double values[] = {1.0, 5.0, 2.0, 10.0, 2.0, 6.0, 3.0, 5.0,
                            1.0, 2.0, 2.0,  2.0, 2.0, 2.0, 3.0, 1.0};
+  // clang-format on
 
   Grid2D<double, 2> grid(values, 0, 2, 0, 4);
   BiCubicInterpolator<Grid2D<double, 2>> interpolator(grid);
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index 5a02877..0167f98 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -33,13 +33,12 @@
 
 #ifndef CERES_NO_CXSPARSE
 
-#include "ceres/cxsparse.h"
-
 #include <string>
 #include <vector>
 
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 #include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cxsparse.h"
 #include "ceres/triplet_sparse_matrix.h"
 #include "glog/logging.h"
 
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index dc4740c..d3f76e0 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -166,7 +166,7 @@
 }  // namespace internal
 }  // namespace ceres
 
-#else   // CERES_NO_CXSPARSE
+#else
 
 typedef void cs_dis;
 
diff --git a/internal/ceres/dense_jacobian_writer.h b/internal/ceres/dense_jacobian_writer.h
index 1b04f38..28c60e2 100644
--- a/internal/ceres/dense_jacobian_writer.h
+++ b/internal/ceres/dense_jacobian_writer.h
@@ -35,21 +35,19 @@
 
 #include "ceres/casts.h"
 #include "ceres/dense_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
 #include "ceres/parameter_block.h"
 #include "ceres/program.h"
 #include "ceres/residual_block.h"
 #include "ceres/scratch_evaluate_preparer.h"
-#include "ceres/internal/eigen.h"
 
 namespace ceres {
 namespace internal {
 
 class DenseJacobianWriter {
  public:
-  DenseJacobianWriter(Evaluator::Options /* ignored */,
-                      Program* program)
-    : program_(program) {
-  }
+  DenseJacobianWriter(Evaluator::Options /* ignored */, Program* program)
+      : program_(program) {}
 
   // JacobianWriter interface.
 
@@ -61,14 +59,13 @@
   }
 
   SparseMatrix* CreateJacobian() const {
-    return new DenseSparseMatrix(program_->NumResiduals(),
-                                 program_->NumEffectiveParameters(),
-                                 true);
+    return new DenseSparseMatrix(
+        program_->NumResiduals(), program_->NumEffectiveParameters(), true);
   }
 
   void Write(int residual_id,
              int residual_offset,
-             double **jacobians,
+             double** jacobians,
              SparseMatrix* jacobian) {
     DenseSparseMatrix* dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
     const ResidualBlock* residual_block =
@@ -86,15 +83,14 @@
       }
 
       const int parameter_block_size = parameter_block->LocalSize();
-      ConstMatrixRef parameter_jacobian(jacobians[j],
-                                        num_residuals,
-                                        parameter_block_size);
+      ConstMatrixRef parameter_jacobian(
+          jacobians[j], num_residuals, parameter_block_size);
 
-      dense_jacobian->mutable_matrix().block(
-          residual_offset,
-          parameter_block->delta_offset(),
-          num_residuals,
-          parameter_block_size) = parameter_jacobian;
+      dense_jacobian->mutable_matrix().block(residual_offset,
+                                             parameter_block->delta_offset(),
+                                             num_residuals,
+                                             parameter_block_size) =
+          parameter_jacobian;
     }
   }
 
diff --git a/internal/ceres/dense_linear_solver_test.cc b/internal/ceres/dense_linear_solver_test.cc
index 4de745f..3929a6f 100644
--- a/internal/ceres/dense_linear_solver_test.cc
+++ b/internal/ceres/dense_linear_solver_test.cc
@@ -29,6 +29,7 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include <memory>
+
 #include "ceres/casts.h"
 #include "ceres/context_impl.h"
 #include "ceres/linear_least_squares_problems.h"
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
index fe7d931..51c6390 100644
--- a/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -132,13 +132,8 @@
   //
   // Note: This is a bit delicate, it assumes that the stride on this
   // matrix is the same as the number of rows.
-  BLAS::SymmetricRankKUpdate(A->num_rows(),
-                             num_cols,
-                             A->values(),
-                             true,
-                             1.0,
-                             0.0,
-                             lhs.data());
+  BLAS::SymmetricRankKUpdate(
+      A->num_rows(), num_cols, A->values(), true, 1.0, 0.0, lhs.data());
 
   if (per_solve_options.D != NULL) {
     // Undo the modifications to the matrix A.
@@ -153,13 +148,10 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type =
-      LAPACK::SolveInPlaceUsingCholesky(num_cols,
-                                        lhs.data(),
-                                        x,
-                                        &summary.message);
+  summary.termination_type = LAPACK::SolveInPlaceUsingCholesky(
+      num_cols, lhs.data(), x, &summary.message);
   event_logger.AddEvent("Solve");
   return summary;
 }
-}   // namespace internal
-}   // namespace ceres
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dense_normal_cholesky_solver.h b/internal/ceres/dense_normal_cholesky_solver.h
index 976718e..68ea611 100644
--- a/internal/ceres/dense_normal_cholesky_solver.h
+++ b/internal/ceres/dense_normal_cholesky_solver.h
@@ -73,7 +73,7 @@
 // library. This solver always returns a solution, it is the user's
 // responsibility to judge if the solution is good enough for their
 // purposes.
-class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
+class DenseNormalCholeskySolver : public DenseSparseMatrixSolver {
  public:
   explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
 
diff --git a/internal/ceres/dense_qr_solver.cc b/internal/ceres/dense_qr_solver.cc
index 161e9c6..44388f3 100644
--- a/internal/ceres/dense_qr_solver.cc
+++ b/internal/ceres/dense_qr_solver.cc
@@ -31,6 +31,7 @@
 #include "ceres/dense_qr_solver.h"
 
 #include <cstddef>
+
 #include "Eigen/Dense"
 #include "ceres/dense_sparse_matrix.h"
 #include "ceres/internal/eigen.h"
@@ -77,7 +78,7 @@
 
   // TODO(sameeragarwal): Since we are copying anyways, the diagonal
   // can be appended to the matrix instead of doing it on A.
-  lhs_ =  A->matrix();
+  lhs_ = A->matrix();
 
   if (per_solve_options.D != NULL) {
     // Undo the modifications to the matrix A.
@@ -164,5 +165,5 @@
   return summary;
 }
 
-}   // namespace internal
-}   // namespace ceres
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dense_qr_solver.h b/internal/ceres/dense_qr_solver.h
index 9ea959d..a30cd1c 100644
--- a/internal/ceres/dense_qr_solver.h
+++ b/internal/ceres/dense_qr_solver.h
@@ -32,8 +32,8 @@
 #ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
 #define CERES_INTERNAL_DENSE_QR_SOLVER_H_
 
-#include "ceres/linear_solver.h"
 #include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
 
 namespace ceres {
 namespace internal {
@@ -78,7 +78,7 @@
 // library. This solver always returns a solution, it is the user's
 // responsibility to judge if the solution is good enough for their
 // purposes.
-class DenseQRSolver: public DenseSparseMatrixSolver {
+class DenseQRSolver : public DenseSparseMatrixSolver {
  public:
   explicit DenseQRSolver(const LinearSolver::Options& options);
 
diff --git a/internal/ceres/dense_sparse_matrix.cc b/internal/ceres/dense_sparse_matrix.cc
index 72e0836..53207fe 100644
--- a/internal/ceres/dense_sparse_matrix.cc
+++ b/internal/ceres/dense_sparse_matrix.cc
@@ -31,17 +31,17 @@
 #include "ceres/dense_sparse_matrix.h"
 
 #include <algorithm>
-#include "ceres/triplet_sparse_matrix.h"
+
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/port.h"
+#include "ceres/triplet_sparse_matrix.h"
 #include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
 DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
-    : has_diagonal_appended_(false),
-      has_diagonal_reserved_(false) {
+    : has_diagonal_appended_(false), has_diagonal_reserved_(false) {
   m_.resize(num_rows, num_cols);
   m_.setZero();
 }
@@ -49,11 +49,10 @@
 DenseSparseMatrix::DenseSparseMatrix(int num_rows,
                                      int num_cols,
                                      bool reserve_diagonal)
-    : has_diagonal_appended_(false),
-      has_diagonal_reserved_(reserve_diagonal) {
+    : has_diagonal_appended_(false), has_diagonal_reserved_(reserve_diagonal) {
   if (reserve_diagonal) {
     // Allocate enough space for the diagonal.
-    m_.resize(num_rows +  num_cols, num_cols);
+    m_.resize(num_rows + num_cols, num_cols);
   } else {
     m_.resize(num_rows, num_cols);
   }
@@ -64,9 +63,9 @@
     : m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())),
       has_diagonal_appended_(false),
       has_diagonal_reserved_(false) {
-  const double *values = m.values();
-  const int *rows = m.rows();
-  const int *cols = m.cols();
+  const double* values = m.values();
+  const int* rows = m.rows();
+  const int* cols = m.cols();
   int num_nonzeros = m.num_nonzeros();
 
   for (int i = 0; i < num_nonzeros; ++i) {
@@ -75,14 +74,9 @@
 }
 
 DenseSparseMatrix::DenseSparseMatrix(const ColMajorMatrix& m)
-    : m_(m),
-      has_diagonal_appended_(false),
-      has_diagonal_reserved_(false) {
-}
+    : m_(m), has_diagonal_appended_(false), has_diagonal_reserved_(false) {}
 
-void DenseSparseMatrix::SetZero() {
-  m_.setZero();
-}
+void DenseSparseMatrix::SetZero() { m_.setZero(); }
 
 void DenseSparseMatrix::RightMultiply(const double* x, double* y) const {
   VectorRef(y, num_rows()) += matrix() * ConstVectorRef(x, num_cols());
@@ -105,7 +99,7 @@
   *dense_matrix = m_.block(0, 0, num_rows(), num_cols());
 }
 
-void DenseSparseMatrix::AppendDiagonal(double *d) {
+void DenseSparseMatrix::AppendDiagonal(double* d) {
   CHECK(!has_diagonal_appended_);
   if (!has_diagonal_reserved_) {
     ColMajorMatrix tmp = m_;
@@ -133,9 +127,7 @@
   return m_.rows();
 }
 
-int DenseSparseMatrix::num_cols() const {
-  return m_.cols();
-}
+int DenseSparseMatrix::num_cols() const { return m_.cols(); }
 
 int DenseSparseMatrix::num_nonzeros() const {
   if (has_diagonal_reserved_ && !has_diagonal_appended_) {
@@ -148,33 +140,30 @@
   return ConstColMajorMatrixRef(
       m_.data(),
       ((has_diagonal_reserved_ && !has_diagonal_appended_)
-       ? m_.rows() - m_.cols()
-       : m_.rows()),
+           ? m_.rows() - m_.cols()
+           : m_.rows()),
       m_.cols(),
       Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
 }
 
 ColMajorMatrixRef DenseSparseMatrix::mutable_matrix() {
-  return ColMajorMatrixRef(
-      m_.data(),
-      ((has_diagonal_reserved_ && !has_diagonal_appended_)
-       ? m_.rows() - m_.cols()
-       : m_.rows()),
-      m_.cols(),
-      Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
+  return ColMajorMatrixRef(m_.data(),
+                           ((has_diagonal_reserved_ && !has_diagonal_appended_)
+                                ? m_.rows() - m_.cols()
+                                : m_.rows()),
+                           m_.cols(),
+                           Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
 }
 
-
 void DenseSparseMatrix::ToTextFile(FILE* file) const {
   CHECK(file != nullptr);
-  const int active_rows =
-      (has_diagonal_reserved_ && !has_diagonal_appended_)
-      ? (m_.rows() - m_.cols())
-      : m_.rows();
+  const int active_rows = (has_diagonal_reserved_ && !has_diagonal_appended_)
+                              ? (m_.rows() - m_.cols())
+                              : m_.rows();
 
   for (int r = 0; r < active_rows; ++r) {
     for (int c = 0; c < m_.cols(); ++c) {
-      fprintf(file,  "% 10d % 10d %17f\n", r, c, m_(r, c));
+      fprintf(file, "% 10d % 10d %17f\n", r, c, m_(r, c));
     }
   }
 }
diff --git a/internal/ceres/dense_sparse_matrix.h b/internal/ceres/dense_sparse_matrix.h
index 6d3d504..34a3be2 100644
--- a/internal/ceres/dense_sparse_matrix.h
+++ b/internal/ceres/dense_sparse_matrix.h
@@ -92,7 +92,7 @@
   // Calling RemoveDiagonal removes the block. It is a fatal error to append a
   // diagonal to a matrix that already has an appended diagonal, and it is also
   // a fatal error to remove a diagonal from a matrix that has none.
-  void AppendDiagonal(double *d);
+  void AppendDiagonal(double* d);
   void RemoveDiagonal();
 
  private:
diff --git a/internal/ceres/dense_sparse_matrix_test.cc b/internal/ceres/dense_sparse_matrix_test.cc
index a0c1cd6..2fa7216 100644
--- a/internal/ceres/dense_sparse_matrix_test.cc
+++ b/internal/ceres/dense_sparse_matrix_test.cc
@@ -35,10 +35,11 @@
 #include "ceres/dense_sparse_matrix.h"
 
 #include <memory>
+
 #include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
 #include "ceres/linear_least_squares_problems.h"
 #include "ceres/triplet_sparse_matrix.h"
-#include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
 
@@ -67,7 +68,7 @@
 }
 
 class DenseSparseMatrixTest : public ::testing::Test {
- protected :
+ protected:
   void SetUp() final {
     std::unique_ptr<LinearLeastSquaresProblem> problem(
         CreateLinearLeastSquaresProblemFromId(1));
diff --git a/internal/ceres/detect_structure.cc b/internal/ceres/detect_structure.cc
index 959a0ee..4aac445 100644
--- a/internal/ceres/detect_structure.cc
+++ b/internal/ceres/detect_structure.cc
@@ -29,6 +29,7 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/detect_structure.h"
+
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
@@ -61,8 +62,7 @@
     } else if (*row_block_size != Eigen::Dynamic &&
                *row_block_size != row.block.size) {
       VLOG(2) << "Dynamic row block size because the block size changed from "
-              << *row_block_size << " to "
-              << row.block.size;
+              << *row_block_size << " to " << row.block.size;
       *row_block_size = Eigen::Dynamic;
     }
 
@@ -73,8 +73,7 @@
     } else if (*e_block_size != Eigen::Dynamic &&
                *e_block_size != bs.cols[e_block_id].size) {
       VLOG(2) << "Dynamic e block size because the block size changed from "
-              << *e_block_size << " to "
-              << bs.cols[e_block_id].size;
+              << *e_block_size << " to " << bs.cols[e_block_id].size;
       *e_block_size = Eigen::Dynamic;
     }
 
@@ -100,9 +99,11 @@
       }
     }
 
+    // clang-format off
     const bool is_everything_dynamic = (*row_block_size == Eigen::Dynamic &&
                                         *e_block_size == Eigen::Dynamic &&
                                         *f_block_size == Eigen::Dynamic);
+    // clang-format on
     if (is_everything_dynamic) {
       break;
     }
@@ -110,10 +111,12 @@
 
   CHECK_NE(*row_block_size, 0) << "No rows found";
   CHECK_NE(*e_block_size, 0) << "No e type blocks found";
+  // clang-format off
   VLOG(1) << "Schur complement static structure <"
           << *row_block_size << ","
           << *e_block_size << ","
           << *f_block_size << ">.";
+  // clang-format on
 }
 
 }  // namespace internal
diff --git a/internal/ceres/detect_structure_test.cc b/internal/ceres/detect_structure_test.cc
index a701a19..8f9c5ed 100644
--- a/internal/ceres/detect_structure_test.cc
+++ b/internal/ceres/detect_structure_test.cc
@@ -28,11 +28,12 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/detect_structure.h"
+
 #include "Eigen/Core"
+#include "ceres/block_structure.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
-#include "ceres/block_structure.h"
-#include "ceres/detect_structure.h"
 
 namespace ceres {
 namespace internal {
@@ -78,11 +79,8 @@
   int e_block_size = 0;
   int f_block_size = 0;
   const int num_eliminate_blocks = 1;
-  DetectStructure(bs,
-                  num_eliminate_blocks,
-                  &row_block_size,
-                  &e_block_size,
-                  &f_block_size);
+  DetectStructure(
+      bs, num_eliminate_blocks, &row_block_size, &e_block_size, &f_block_size);
 
   EXPECT_EQ(row_block_size, expected_row_block_size);
   EXPECT_EQ(e_block_size, expected_e_block_size);
@@ -130,11 +128,8 @@
   int e_block_size = 0;
   int f_block_size = 0;
   const int num_eliminate_blocks = 1;
-  DetectStructure(bs,
-                  num_eliminate_blocks,
-                  &row_block_size,
-                  &e_block_size,
-                  &f_block_size);
+  DetectStructure(
+      bs, num_eliminate_blocks, &row_block_size, &e_block_size, &f_block_size);
 
   EXPECT_EQ(row_block_size, expected_row_block_size);
   EXPECT_EQ(e_block_size, expected_e_block_size);
@@ -146,7 +141,6 @@
   const int expected_e_block_size = 3;
   const int expected_f_block_size = Eigen::Dynamic;
 
-
   CompressedRowBlockStructure bs;
 
   bs.cols.push_back(Block());
@@ -183,11 +177,8 @@
   int e_block_size = 0;
   int f_block_size = 0;
   const int num_eliminate_blocks = 1;
-  DetectStructure(bs,
-                  num_eliminate_blocks,
-                  &row_block_size,
-                  &e_block_size,
-                  &f_block_size);
+  DetectStructure(
+      bs, num_eliminate_blocks, &row_block_size, &e_block_size, &f_block_size);
 
   EXPECT_EQ(row_block_size, expected_row_block_size);
   EXPECT_EQ(e_block_size, expected_e_block_size);
@@ -235,11 +226,8 @@
   int e_block_size = 0;
   int f_block_size = 0;
   const int num_eliminate_blocks = 2;
-  DetectStructure(bs,
-                  num_eliminate_blocks,
-                  &row_block_size,
-                  &e_block_size,
-                  &f_block_size);
+  DetectStructure(
+      bs, num_eliminate_blocks, &row_block_size, &e_block_size, &f_block_size);
 
   EXPECT_EQ(row_block_size, expected_row_block_size);
   EXPECT_EQ(e_block_size, expected_e_block_size);
@@ -279,11 +267,8 @@
   int e_block_size = 0;
   int f_block_size = 0;
   const int num_eliminate_blocks = 1;
-  DetectStructure(bs,
-                  num_eliminate_blocks,
-                  &row_block_size,
-                  &e_block_size,
-                  &f_block_size);
+  DetectStructure(
+      bs, num_eliminate_blocks, &row_block_size, &e_block_size, &f_block_size);
 
   EXPECT_EQ(row_block_size, expected_row_block_size);
   EXPECT_EQ(e_block_size, expected_e_block_size);
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index ecc6b88..03ae22f 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -49,7 +49,7 @@
 namespace {
 const double kMaxMu = 1.0;
 const double kMinMu = 1e-8;
-}
+}  // namespace
 
 DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
     : linear_solver_(options.linear_solver),
@@ -122,8 +122,8 @@
   //
   jacobian->SquaredColumnNorm(diagonal_.data());
   for (int i = 0; i < n; ++i) {
-    diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
-                            max_diagonal_);
+    diagonal_[i] =
+        std::min(std::max(diagonal_[i], min_diagonal_), max_diagonal_);
   }
   diagonal_ = diagonal_.array().sqrt();
 
@@ -171,9 +171,8 @@
 // The gradient, the Gauss-Newton step, the Cauchy point,
 // and all calculations involving the Jacobian have to
 // be adjusted accordingly.
-void DoglegStrategy::ComputeGradient(
-    SparseMatrix* jacobian,
-    const double* residuals) {
+void DoglegStrategy::ComputeGradient(SparseMatrix* jacobian,
+                                     const double* residuals) {
   gradient_.setZero();
   jacobian->LeftMultiply(residuals, gradient_.data());
   gradient_.array() /= diagonal_.array();
@@ -187,8 +186,7 @@
   Jg.setZero();
   // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
   // instead of (J * D^-1) * (D^-1 * g).
-  Vector scaled_gradient =
-      (gradient_.array() / diagonal_.array()).matrix();
+  Vector scaled_gradient = (gradient_.array() / diagonal_.array()).matrix();
   jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
   alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
 }
@@ -217,7 +215,7 @@
   // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
   // the trust region. Rescale the Cauchy point to the trust region
   // and return.
-  if  (gradient_norm * alpha_ >= radius_) {
+  if (gradient_norm * alpha_ >= radius_) {
     dogleg_step = -(radius_ / gradient_norm) * gradient_;
     dogleg_step_norm_ = radius_;
     dogleg_step.array() /= diagonal_.array();
@@ -242,14 +240,12 @@
   //   = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
   const double c = b_dot_a - a_squared_norm;
   const double d = sqrt(c * c + b_minus_a_squared_norm *
-                        (pow(radius_, 2.0) - a_squared_norm));
+                                    (pow(radius_, 2.0) - a_squared_norm));
 
-  double beta =
-      (c <= 0)
-      ? (d - c) /  b_minus_a_squared_norm
-      : (radius_ * radius_ - a_squared_norm) / (d + c);
-  dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
-      + beta * gauss_newton_step_;
+  double beta = (c <= 0) ? (d - c) / b_minus_a_squared_norm
+                         : (radius_ * radius_ - a_squared_norm) / (d + c);
+  dogleg_step =
+      (-alpha_ * (1.0 - beta)) * gradient_ + beta * gauss_newton_step_;
   dogleg_step_norm_ = dogleg_step.norm();
   dogleg_step.array() /= diagonal_.array();
   VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
@@ -345,13 +341,13 @@
   // correctly determined.
   const double kCosineThreshold = 0.99;
   const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
-  const double cosine_angle = -minimum.dot(grad_minimum) /
-      (minimum.norm() * grad_minimum.norm());
+  const double cosine_angle =
+      -minimum.dot(grad_minimum) / (minimum.norm() * grad_minimum.norm());
   if (cosine_angle < kCosineThreshold) {
     LOG(WARNING) << "First order optimality seems to be violated "
                  << "in the subspace method!\n"
-                 << "Cosine of angle between x and B x + g is "
-                 << cosine_angle << ".\n"
+                 << "Cosine of angle between x and B x + g is " << cosine_angle
+                 << ".\n"
                  << "Taking a regular dogleg step instead.\n"
                  << "Please consider filing a bug report if this "
                  << "happens frequently or consistently.\n";
@@ -423,15 +419,17 @@
   const double trB = subspace_B_.trace();
   const double r2 = radius_ * radius_;
   Matrix2d B_adj;
+  // clang-format off
   B_adj <<  subspace_B_(1, 1) , -subspace_B_(0, 1),
-            -subspace_B_(1, 0) ,  subspace_B_(0, 0);
+           -subspace_B_(1, 0) ,  subspace_B_(0, 0);
+  // clang-format on
 
   Vector polynomial(5);
   polynomial(0) = r2;
   polynomial(1) = 2.0 * r2 * trB;
   polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm();
-  polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_
-      - r2 * detB * trB);
+  polynomial(3) =
+      -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_ - r2 * detB * trB);
   polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
 
   return polynomial;
@@ -565,10 +563,8 @@
     // of Jx = -r and later set x = -y to avoid having to modify
     // either jacobian or residuals.
     InvalidateArray(n, gauss_newton_step_.data());
-    linear_solver_summary = linear_solver_->Solve(jacobian,
-                                                  residuals,
-                                                  solve_options,
-                                                  gauss_newton_step_.data());
+    linear_solver_summary = linear_solver_->Solve(
+        jacobian, residuals, solve_options, gauss_newton_step_.data());
 
     if (per_solve_options.dump_format_type == CONSOLE ||
         (per_solve_options.dump_format_type != CONSOLE &&
@@ -641,9 +637,7 @@
   reuse_ = false;
 }
 
-double DoglegStrategy::Radius() const {
-  return radius_;
-}
+double DoglegStrategy::Radius() const { return radius_; }
 
 bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
   // Compute an orthogonal basis for the subspace using QR decomposition.
@@ -701,8 +695,8 @@
 
   subspace_g_ = subspace_basis_.transpose() * gradient_;
 
-  Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
-      Jb(2, jacobian->num_rows());
+  Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor> Jb(
+      2, jacobian->num_rows());
   Jb.setZero();
 
   Vector tmp;
diff --git a/internal/ceres/dogleg_strategy.h b/internal/ceres/dogleg_strategy.h
index 1150940..9616ffe 100644
--- a/internal/ceres/dogleg_strategy.h
+++ b/internal/ceres/dogleg_strategy.h
@@ -59,9 +59,9 @@
 
   // TrustRegionStrategy interface
   Summary ComputeStep(const PerSolveOptions& per_solve_options,
-                              SparseMatrix* jacobian,
-                              const double* residuals,
-                              double* step) final;
+                      SparseMatrix* jacobian,
+                      const double* residuals,
+                      double* step) final;
   void StepAccepted(double step_quality) final;
   void StepRejected(double step_quality) final;
   void StepIsInvalid();
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
index af5b3e8..0c20f25 100644
--- a/internal/ceres/dogleg_strategy_test.cc
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -28,11 +28,13 @@
 //
 // Author: moll.markus@arcor.de (Markus Moll)
 
+#include "ceres/dogleg_strategy.h"
+
 #include <limits>
 #include <memory>
-#include "ceres/internal/eigen.h"
+
 #include "ceres/dense_qr_solver.h"
-#include "ceres/dogleg_strategy.h"
+#include "ceres/internal/eigen.h"
 #include "ceres/linear_solver.h"
 #include "ceres/trust_region_strategy.h"
 #include "glog/logging.h"
@@ -63,12 +65,14 @@
   void SetUp() final {
     Matrix basis(6, 6);
     // The following lines exceed 80 characters for better readability.
+    // clang-format off
     basis << -0.1046920933796121, -0.7449367449921986, -0.4190744502875876, -0.4480450716142566,  0.2375351607929440, -0.0363053418882862,  // NOLINT
               0.4064975684355914,  0.2681113508511354, -0.7463625494601520, -0.0803264850508117, -0.4463149623021321,  0.0130224954867195,  // NOLINT
              -0.5514387729089798,  0.1026621026168657, -0.5008316122125011,  0.5738122212666414,  0.2974664724007106,  0.1296020877535158,  // NOLINT
               0.5037835370947156,  0.2668479925183712, -0.1051754618492798, -0.0272739396578799,  0.7947481647088278, -0.1776623363955670,  // NOLINT
              -0.4005458426625444,  0.2939330589634109, -0.0682629380550051, -0.2895448882503687, -0.0457239396341685, -0.8139899477847840,  // NOLINT
              -0.3247764582762654,  0.4528151365941945, -0.0276683863102816, -0.6155994592510784,  0.1489240599972848,  0.5362574892189350;  // NOLINT
+    // clang-format on
 
     Vector Ddiag(6);
     Ddiag << 1.0, 2.0, 4.0, 8.0, 16.0, 32.0;
@@ -139,10 +143,8 @@
   DoglegStrategy strategy(options_);
   TrustRegionStrategy::PerSolveOptions pso;
 
-  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
-                                                              jacobian_.get(),
-                                                              residual_.data(),
-                                                              x_.data());
+  TrustRegionStrategy::Summary summary =
+      strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
   EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
@@ -159,10 +161,8 @@
   DoglegStrategy strategy(options_);
   TrustRegionStrategy::PerSolveOptions pso;
 
-  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
-                                                              jacobian_.get(),
-                                                              residual_.data(),
-                                                              x_.data());
+  TrustRegionStrategy::Summary summary =
+      strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
   EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
@@ -179,10 +179,8 @@
   DoglegStrategy strategy(options_);
   TrustRegionStrategy::PerSolveOptions pso;
 
-  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
-                                                              jacobian_.get(),
-                                                              residual_.data(),
-                                                              x_.data());
+  TrustRegionStrategy::Summary summary =
+      strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
   EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_NEAR(x_(0), 1.0, kToleranceLoose);
@@ -216,15 +214,13 @@
 
   // Check if the gradient projects onto itself.
   const Vector gradient = strategy.gradient();
-  EXPECT_NEAR((gradient - basis*(basis.transpose()*gradient)).norm(),
+  EXPECT_NEAR((gradient - basis * (basis.transpose() * gradient)).norm(),
               0.0,
               kTolerance);
 
   // Check if the Gauss-Newton point projects onto itself.
   const Vector gn = strategy.gauss_newton_step();
-  EXPECT_NEAR((gn - basis*(basis.transpose()*gn)).norm(),
-              0.0,
-              kTolerance);
+  EXPECT_NEAR((gn - basis * (basis.transpose() * gn)).norm(), 0.0, kTolerance);
 }
 
 // Test if the step is correct if the gradient and the Gauss-Newton step point
@@ -241,10 +237,8 @@
   DoglegStrategy strategy(options_);
   TrustRegionStrategy::PerSolveOptions pso;
 
-  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
-                                                              jacobian_.get(),
-                                                              residual_.data(),
-                                                              x_.data());
+  TrustRegionStrategy::Summary summary =
+      strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
   EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
@@ -269,10 +263,8 @@
   DoglegStrategy strategy(options_);
   TrustRegionStrategy::PerSolveOptions pso;
 
-  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
-                                                              jacobian_.get(),
-                                                              residual_.data(),
-                                                              x_.data());
+  TrustRegionStrategy::Summary summary =
+      strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
 
   EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
diff --git a/internal/ceres/dynamic_autodiff_cost_function_test.cc b/internal/ceres/dynamic_autodiff_cost_function_test.cc
index 6b57fd3..55d3fe1 100644
--- a/internal/ceres/dynamic_autodiff_cost_function_test.cc
+++ b/internal/ceres/dynamic_autodiff_cost_function_test.cc
@@ -30,10 +30,11 @@
 //         mierle@gmail.com (Keir Mierle)
 //         sameeragarwal@google.com (Sameer Agarwal)
 
-#include <cstddef>
-
-#include <memory>
 #include "ceres/dynamic_autodiff_cost_function.h"
+
+#include <cstddef>
+#include <memory>
+
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -87,9 +88,8 @@
   vector<double*> parameter_blocks(2);
   parameter_blocks[0] = &param_block_0[0];
   parameter_blocks[1] = &param_block_1[0];
-  EXPECT_TRUE(cost_function.Evaluate(&parameter_blocks[0],
-                                     residuals.data(),
-                                     NULL));
+  EXPECT_TRUE(
+      cost_function.Evaluate(&parameter_blocks[0], residuals.data(), NULL));
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(1.0 * r, residuals.at(r * 2));
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
@@ -127,9 +127,8 @@
   jacobian.push_back(jacobian_vect[1].data());
 
   // Test jacobian computation.
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
-                                     residuals.data(),
-                                     jacobian.data()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.data(), residuals.data(), jacobian.data()));
 
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
@@ -138,11 +137,11 @@
   EXPECT_EQ(420, residuals.at(20));
   for (int p = 0; p < 10; ++p) {
     // Check "A" Jacobian.
-    EXPECT_EQ(-1.0, jacobian_vect[0][2*p * 10 + p]);
+    EXPECT_EQ(-1.0, jacobian_vect[0][2 * p * 10 + p]);
     // Check "B" Jacobian.
-    EXPECT_EQ(+1.0, jacobian_vect[0][(2*p+1) * 10 + p]);
-    jacobian_vect[0][2*p * 10 + p] = 0.0;
-    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+    EXPECT_EQ(+1.0, jacobian_vect[0][(2 * p + 1) * 10 + p]);
+    jacobian_vect[0][2 * p * 10 + p] = 0.0;
+    jacobian_vect[0][(2 * p + 1) * 10 + p] = 0.0;
   }
 
   // Check "C" Jacobian for first parameter block.
@@ -194,9 +193,8 @@
   jacobian.push_back(jacobian_vect[1].data());
 
   // Test jacobian computation.
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
-                                     residuals.data(),
-                                     jacobian.data()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.data(), residuals.data(), jacobian.data()));
 
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
@@ -214,7 +212,8 @@
   }
 }
 
-TEST(DynamicAutodiffCostFunctionTest, JacobianWithSecondParameterBlockConstant) {  // NOLINT
+TEST(DynamicAutodiffCostFunctionTest,
+     JacobianWithSecondParameterBlockConstant) {  // NOLINT
   // Test the residual counting.
   vector<double> param_block_0(10, 0.0);
   for (int i = 0; i < 10; ++i) {
@@ -244,9 +243,8 @@
   jacobian.push_back(NULL);
 
   // Test jacobian computation.
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
-                                     residuals.data(),
-                                     jacobian.data()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.data(), residuals.data(), jacobian.data()));
 
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
@@ -255,11 +253,11 @@
   EXPECT_EQ(420, residuals.at(20));
   for (int p = 0; p < 10; ++p) {
     // Check "A" Jacobian.
-    EXPECT_EQ(-1.0, jacobian_vect[0][2*p * 10 + p]);
+    EXPECT_EQ(-1.0, jacobian_vect[0][2 * p * 10 + p]);
     // Check "B" Jacobian.
-    EXPECT_EQ(+1.0, jacobian_vect[0][(2*p+1) * 10 + p]);
-    jacobian_vect[0][2*p * 10 + p] = 0.0;
-    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+    EXPECT_EQ(+1.0, jacobian_vect[0][(2 * p + 1) * 10 + p]);
+    jacobian_vect[0][2 * p * 10 + p] = 0.0;
+    jacobian_vect[0][(2 * p + 1) * 10 + p] = 0.0;
   }
 
   // Check "C" Jacobian for first parameter block.
@@ -330,10 +328,10 @@
 
     // Prepare the cost function.
     typedef DynamicAutoDiffCostFunction<MyThreeParameterCostFunctor, 3>
-      DynamicMyThreeParameterCostFunction;
-    DynamicMyThreeParameterCostFunction * cost_function =
-      new DynamicMyThreeParameterCostFunction(
-        new MyThreeParameterCostFunctor());
+        DynamicMyThreeParameterCostFunction;
+    DynamicMyThreeParameterCostFunction* cost_function =
+        new DynamicMyThreeParameterCostFunction(
+            new MyThreeParameterCostFunctor());
     cost_function->AddParameterBlock(1);
     cost_function->AddParameterBlock(2);
     cost_function->AddParameterBlock(3);
@@ -431,9 +429,8 @@
 
 TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
   vector<double> residuals(7, -100000);
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       NULL));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), NULL));
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
   }
@@ -447,9 +444,8 @@
   jacobian.push_back(jacobian_vect_[1].data());
   jacobian.push_back(jacobian_vect_[2].data());
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -477,9 +473,8 @@
   jacobian.push_back(jacobian_vect_[1].data());
   jacobian.push_back(NULL);
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -499,9 +494,8 @@
   jacobian.push_back(NULL);
   jacobian.push_back(jacobian_vect_[2].data());
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -567,10 +561,9 @@
 
     // Prepare the cost function.
     typedef DynamicAutoDiffCostFunction<MySixParameterCostFunctor, 3>
-      DynamicMySixParameterCostFunction;
-    DynamicMySixParameterCostFunction * cost_function =
-      new DynamicMySixParameterCostFunction(
-        new MySixParameterCostFunctor());
+        DynamicMySixParameterCostFunction;
+    DynamicMySixParameterCostFunction* cost_function =
+        new DynamicMySixParameterCostFunction(new MySixParameterCostFunctor());
     for (int i = 0; i < 6; ++i) {
       cost_function->AddParameterBlock(1);
     }
@@ -675,9 +668,8 @@
 
 TEST_F(SixParameterCostFunctorTest, TestSixParameterResiduals) {
   vector<double> residuals(7, -100000);
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       NULL));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), NULL));
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
   }
@@ -694,9 +686,8 @@
   jacobian.push_back(jacobian_vect_[4].data());
   jacobian.push_back(jacobian_vect_[5].data());
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -720,9 +711,8 @@
   jacobian.push_back(jacobian_vect_[4].data());
   jacobian.push_back(NULL);
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -751,9 +741,8 @@
   jacobian.push_back(NULL);
   jacobian.push_back(jacobian_vect_[5].data());
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
diff --git a/internal/ceres/dynamic_compressed_row_finalizer.h b/internal/ceres/dynamic_compressed_row_finalizer.h
index a25a308..30c98d8 100644
--- a/internal/ceres/dynamic_compressed_row_finalizer.h
+++ b/internal/ceres/dynamic_compressed_row_finalizer.h
@@ -40,7 +40,7 @@
 struct DynamicCompressedRowJacobianFinalizer {
   void operator()(SparseMatrix* base_jacobian, int num_parameters) {
     DynamicCompressedRowSparseMatrix* jacobian =
-      down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+        down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
     jacobian->Finalize(num_parameters);
   }
 };
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.h b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
index 6e5ac38..ef8fa25 100644
--- a/internal/ceres/dynamic_compressed_row_jacobian_writer.h
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
@@ -47,8 +47,7 @@
  public:
   DynamicCompressedRowJacobianWriter(Evaluator::Options /* ignored */,
                                      Program* program)
-    : program_(program) {
-  }
+      : program_(program) {}
 
   // JacobianWriter interface.
 
@@ -70,7 +69,7 @@
   // This method is thread-safe over residual blocks (each `residual_id`).
   void Write(int residual_id,
              int residual_offset,
-             double **jacobians,
+             double** jacobians,
              SparseMatrix* base_jacobian);
 
  private:
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
index f020768..936e682 100644
--- a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
@@ -28,22 +28,19 @@
 //
 // Author: richie.stebbing@gmail.com (Richard Stebbing)
 
-#include <cstring>
 #include "ceres/dynamic_compressed_row_sparse_matrix.h"
 
+#include <cstring>
+
 namespace ceres {
 namespace internal {
 
 DynamicCompressedRowSparseMatrix::DynamicCompressedRowSparseMatrix(
-  int num_rows,
-  int num_cols,
-  int initial_max_num_nonzeros)
-    : CompressedRowSparseMatrix(num_rows,
-                                num_cols,
-                                initial_max_num_nonzeros) {
-    dynamic_cols_.resize(num_rows);
-    dynamic_values_.resize(num_rows);
-  }
+    int num_rows, int num_cols, int initial_max_num_nonzeros)
+    : CompressedRowSparseMatrix(num_rows, num_cols, initial_max_num_nonzeros) {
+  dynamic_cols_.resize(num_rows);
+  dynamic_values_.resize(num_rows);
+}
 
 void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
                                                    int col,
@@ -56,8 +53,7 @@
   dynamic_values_[row].push_back(value);
 }
 
-void DynamicCompressedRowSparseMatrix::ClearRows(int row_start,
-                                                 int num_rows) {
+void DynamicCompressedRowSparseMatrix::ClearRows(int row_start, int num_rows) {
   for (int r = 0; r < num_rows; ++r) {
     const int i = row_start + r;
     CHECK_GE(i, 0);
@@ -99,8 +95,8 @@
   mutable_rows()[num_rows()] = index_into_values_and_cols;
 
   CHECK_EQ(index_into_values_and_cols, num_jacobian_nonzeros)
-    << "Ceres bug: final index into values_ and cols_ should be equal to "
-    << "the number of jacobian nonzeros. Please contact the developers!";
+      << "Ceres bug: final index into values_ and cols_ should be equal to "
+      << "the number of jacobian nonzeros. Please contact the developers!";
 }
 
 }  // namespace internal
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
index 25b51ce..95dc807 100644
--- a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/dynamic_compressed_row_sparse_matrix.h"
 
 #include <memory>
+
 #include "ceres/casts.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/internal/eigen.h"
@@ -60,14 +61,10 @@
     InitialiseDenseReference();
     InitialiseSparseMatrixReferences();
 
-    dcrsm.reset(new DynamicCompressedRowSparseMatrix(num_rows,
-                                                     num_cols,
-                                                     0));
+    dcrsm.reset(new DynamicCompressedRowSparseMatrix(num_rows, num_cols, 0));
   }
 
-  void Finalize() {
-    dcrsm->Finalize(num_additional_elements);
-  }
+  void Finalize() { dcrsm->Finalize(num_additional_elements); }
 
   void InitialiseDenseReference() {
     dense.resize(num_rows, num_cols);
@@ -96,9 +93,8 @@
     }
     ASSERT_EQ(values.size(), expected_num_nonzeros);
 
-    tsm.reset(new TripletSparseMatrix(num_rows,
-                                      num_cols,
-                                      expected_num_nonzeros));
+    tsm.reset(
+        new TripletSparseMatrix(num_rows, num_cols, expected_num_nonzeros));
     copy(rows.begin(), rows.end(), tsm->mutable_rows());
     copy(cols.begin(), cols.end(), tsm->mutable_cols());
     copy(values.begin(), values.end(), tsm->mutable_values());
diff --git a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
index d86cc95..0150f5e 100644
--- a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
+++ b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
@@ -29,10 +29,11 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 //         mierle@gmail.com (Keir Mierle)
 
-#include <cstddef>
-
-#include <memory>
 #include "ceres/dynamic_numeric_diff_cost_function.h"
+
+#include <cstddef>
+#include <memory>
+
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -87,9 +88,8 @@
   vector<double*> parameter_blocks(2);
   parameter_blocks[0] = &param_block_0[0];
   parameter_blocks[1] = &param_block_1[0];
-  EXPECT_TRUE(cost_function.Evaluate(&parameter_blocks[0],
-                                     residuals.data(),
-                                     NULL));
+  EXPECT_TRUE(
+      cost_function.Evaluate(&parameter_blocks[0], residuals.data(), NULL));
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(1.0 * r, residuals.at(r * 2));
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
@@ -97,7 +97,6 @@
   EXPECT_EQ(0, residuals.at(20));
 }
 
-
 TEST(DynamicNumericdiffCostFunctionTest, TestJacobian) {
   // Test the residual counting.
   vector<double> param_block_0(10, 0.0);
@@ -128,9 +127,8 @@
   jacobian.push_back(jacobian_vect[1].data());
 
   // Test jacobian computation.
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
-                                     residuals.data(),
-                                     jacobian.data()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.data(), residuals.data(), jacobian.data()));
 
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
@@ -139,11 +137,11 @@
   EXPECT_EQ(420, residuals.at(20));
   for (int p = 0; p < 10; ++p) {
     // Check "A" Jacobian.
-    EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+    EXPECT_NEAR(-1.0, jacobian_vect[0][2 * p * 10 + p], kTolerance);
     // Check "B" Jacobian.
-    EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
-    jacobian_vect[0][2*p * 10 + p] = 0.0;
-    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+    EXPECT_NEAR(+1.0, jacobian_vect[0][(2 * p + 1) * 10 + p], kTolerance);
+    jacobian_vect[0][2 * p * 10 + p] = 0.0;
+    jacobian_vect[0][(2 * p + 1) * 10 + p] = 0.0;
   }
 
   // Check "C" Jacobian for first parameter block.
@@ -165,7 +163,8 @@
   }
 }
 
-TEST(DynamicNumericdiffCostFunctionTest, JacobianWithFirstParameterBlockConstant) {  // NOLINT
+TEST(DynamicNumericdiffCostFunctionTest,
+     JacobianWithFirstParameterBlockConstant) {  // NOLINT
   // Test the residual counting.
   vector<double> param_block_0(10, 0.0);
   for (int i = 0; i < 10; ++i) {
@@ -195,9 +194,8 @@
   jacobian.push_back(jacobian_vect[1].data());
 
   // Test jacobian computation.
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
-                                     residuals.data(),
-                                     jacobian.data()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.data(), residuals.data(), jacobian.data()));
 
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
@@ -215,7 +213,8 @@
   }
 }
 
-TEST(DynamicNumericdiffCostFunctionTest, JacobianWithSecondParameterBlockConstant) {  // NOLINT
+TEST(DynamicNumericdiffCostFunctionTest,
+     JacobianWithSecondParameterBlockConstant) {  // NOLINT
   // Test the residual counting.
   vector<double> param_block_0(10, 0.0);
   for (int i = 0; i < 10; ++i) {
@@ -245,9 +244,8 @@
   jacobian.push_back(NULL);
 
   // Test jacobian computation.
-  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
-                                     residuals.data(),
-                                     jacobian.data()));
+  EXPECT_TRUE(cost_function.Evaluate(
+      parameter_blocks.data(), residuals.data(), jacobian.data()));
 
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
@@ -256,11 +254,11 @@
   EXPECT_EQ(420, residuals.at(20));
   for (int p = 0; p < 10; ++p) {
     // Check "A" Jacobian.
-    EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+    EXPECT_NEAR(-1.0, jacobian_vect[0][2 * p * 10 + p], kTolerance);
     // Check "B" Jacobian.
-    EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
-    jacobian_vect[0][2*p * 10 + p] = 0.0;
-    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+    EXPECT_NEAR(+1.0, jacobian_vect[0][(2 * p + 1) * 10 + p], kTolerance);
+    jacobian_vect[0][2 * p * 10 + p] = 0.0;
+    jacobian_vect[0][(2 * p + 1) * 10 + p] = 0.0;
   }
 
   // Check "C" Jacobian for first parameter block.
@@ -331,10 +329,10 @@
 
     // Prepare the cost function.
     typedef DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>
-      DynamicMyThreeParameterCostFunction;
-    DynamicMyThreeParameterCostFunction * cost_function =
-      new DynamicMyThreeParameterCostFunction(
-        new MyThreeParameterCostFunctor());
+        DynamicMyThreeParameterCostFunction;
+    DynamicMyThreeParameterCostFunction* cost_function =
+        new DynamicMyThreeParameterCostFunction(
+            new MyThreeParameterCostFunctor());
     cost_function->AddParameterBlock(1);
     cost_function->AddParameterBlock(2);
     cost_function->AddParameterBlock(3);
@@ -432,9 +430,8 @@
 
 TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
   vector<double> residuals(7, -100000);
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       NULL));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), NULL));
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
   }
@@ -448,9 +445,8 @@
   jacobian.push_back(jacobian_vect_[1].data());
   jacobian.push_back(jacobian_vect_[2].data());
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -478,9 +474,8 @@
   jacobian.push_back(jacobian_vect_[1].data());
   jacobian.push_back(NULL);
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
@@ -500,9 +495,8 @@
   jacobian.push_back(NULL);
   jacobian.push_back(jacobian_vect_[2].data());
 
-  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
-                                       residuals.data(),
-                                       jacobian.data()));
+  EXPECT_TRUE(cost_function_->Evaluate(
+      parameter_blocks_.data(), residuals.data(), jacobian.data()));
 
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index 25d5417..d31c422 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -95,7 +95,7 @@
       LOG(FATAL) << "Unsupported sparse linear algebra library for "
                  << "dynamic sparsity: "
                  << SparseLinearAlgebraLibraryTypeToString(
-                     options_.sparse_linear_algebra_library_type);
+                        options_.sparse_linear_algebra_library_type);
   }
 
   if (per_solve_options.D != nullptr) {
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.h b/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
index 4e31c7a..36118ba 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
@@ -35,7 +35,9 @@
 #define CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include "ceres/linear_solver.h"
 
@@ -59,23 +61,19 @@
   virtual ~DynamicSparseNormalCholeskySolver() {}
 
  private:
-  LinearSolver::Summary SolveImpl(
-      CompressedRowSparseMatrix* A,
-      const double* b,
-      const LinearSolver::PerSolveOptions& options,
-      double* x) final;
+  LinearSolver::Summary SolveImpl(CompressedRowSparseMatrix* A,
+                                  const double* b,
+                                  const LinearSolver::PerSolveOptions& options,
+                                  double* x) final;
 
-  LinearSolver::Summary SolveImplUsingSuiteSparse(
-      CompressedRowSparseMatrix* A,
-      double* rhs_and_solution);
+  LinearSolver::Summary SolveImplUsingSuiteSparse(CompressedRowSparseMatrix* A,
+                                                  double* rhs_and_solution);
 
-  LinearSolver::Summary SolveImplUsingCXSparse(
-      CompressedRowSparseMatrix* A,
-      double* rhs_and_solution);
+  LinearSolver::Summary SolveImplUsingCXSparse(CompressedRowSparseMatrix* A,
+                                               double* rhs_and_solution);
 
-  LinearSolver::Summary SolveImplUsingEigen(
-      CompressedRowSparseMatrix* A,
-      double* rhs_and_solution);
+  LinearSolver::Summary SolveImplUsingEigen(CompressedRowSparseMatrix* A,
+                                            double* rhs_and_solution);
 
   const LinearSolver::Options options_;
 };
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
index 3d0d46d..8bf609e 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
@@ -29,6 +29,8 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include <memory>
+
+#include "Eigen/Cholesky"
 #include "ceres/casts.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/context_impl.h"
@@ -39,8 +41,6 @@
 #include "glog/logging.h"
 #include "gtest/gtest.h"
 
-#include "Eigen/Cholesky"
-
 namespace ceres {
 namespace internal {
 
diff --git a/internal/ceres/dynamic_sparsity_test.cc b/internal/ceres/dynamic_sparsity_test.cc
index 94ed173..12e62ef 100644
--- a/internal/ceres/dynamic_sparsity_test.cc
+++ b/internal/ceres/dynamic_sparsity_test.cc
@@ -33,6 +33,7 @@
 
 #include <cmath>
 #include <vector>
+
 #include "ceres/ceres.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
@@ -53,6 +54,7 @@
 
 const int kYRows = 212;
 const int kYCols = 2;
+// clang-format off
 const double kYData[kYRows * kYCols] = {
   +3.871364e+00, +9.916027e-01,
   +3.864003e+00, +1.034148e+00,
@@ -267,6 +269,7 @@
   +3.870542e+00, +9.996121e-01,
   +3.865424e+00, +1.028474e+00
 };
+// clang-format on
 
 ConstMatrixRef kY(kYData, kYRows, kYCols);
 
@@ -327,7 +330,8 @@
     return true;
   }
 
-  static CostFunction* Create(const int num_segments, const Eigen::Vector2d& y) {
+  static CostFunction* Create(const int num_segments,
+                              const Eigen::Vector2d& y) {
     return new PointToLineSegmentContourCostFunction(num_segments, y);
   }
 
diff --git a/internal/ceres/eigensparse.h b/internal/ceres/eigensparse.h
index 2e6c6f0..bb89c2c 100644
--- a/internal/ceres/eigensparse.h
+++ b/internal/ceres/eigensparse.h
@@ -56,8 +56,8 @@
 
   // SparseCholesky interface.
   virtual ~EigenSparseCholesky();
-  virtual LinearSolverTerminationType Factorize(
-      CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message) = 0;
   virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
   virtual LinearSolverTerminationType Solve(const double* rhs,
                                             double* solution,
@@ -74,8 +74,8 @@
 
   // SparseCholesky interface.
   virtual ~FloatEigenSparseCholesky();
-  virtual LinearSolverTerminationType Factorize(
-      CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message) = 0;
   virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
   virtual LinearSolverTerminationType Solve(const double* rhs,
                                             double* solution,
diff --git a/internal/ceres/evaluation_callback_test.cc b/internal/ceres/evaluation_callback_test.cc
index ccca942..0ca2625 100644
--- a/internal/ceres/evaluation_callback_test.cc
+++ b/internal/ceres/evaluation_callback_test.cc
@@ -34,10 +34,10 @@
 #include <limits>
 #include <vector>
 
+#include "ceres/autodiff_cost_function.h"
 #include "ceres/problem.h"
 #include "ceres/problem_impl.h"
 #include "ceres/sized_cost_function.h"
-#include "ceres/autodiff_cost_function.h"
 #include "ceres/solver.h"
 #include "gtest/gtest.h"
 
@@ -249,8 +249,8 @@
  public:
   void PrepareForEvaluation(bool evaluate_jacobians,
                             bool new_evaluation_point) final {
-    (void) evaluate_jacobians;
-    (void) new_evaluation_point;
+    (void)evaluate_jacobians;
+    (void)new_evaluation_point;
     counter_ += 1.0;
   }
 
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
index 8387983..5168741 100644
--- a/internal/ceres/evaluator.cc
+++ b/internal/ceres/evaluator.cc
@@ -28,7 +28,10 @@
 //
 // Author: keir@google.com (Keir Mierle)
 
+#include "ceres/evaluator.h"
+
 #include <vector>
+
 #include "ceres/block_evaluate_preparer.h"
 #include "ceres/block_jacobian_writer.h"
 #include "ceres/compressed_row_jacobian_writer.h"
@@ -37,7 +40,6 @@
 #include "ceres/dense_jacobian_writer.h"
 #include "ceres/dynamic_compressed_row_finalizer.h"
 #include "ceres/dynamic_compressed_row_jacobian_writer.h"
-#include "ceres/evaluator.h"
 #include "ceres/internal/port.h"
 #include "ceres/program_evaluator.h"
 #include "ceres/scratch_evaluate_preparer.h"
@@ -56,26 +58,23 @@
   switch (options.linear_solver_type) {
     case DENSE_QR:
     case DENSE_NORMAL_CHOLESKY:
-      return new ProgramEvaluator<ScratchEvaluatePreparer,
-                                  DenseJacobianWriter>(options,
-                                                       program);
+      return new ProgramEvaluator<ScratchEvaluatePreparer, DenseJacobianWriter>(
+          options, program);
     case DENSE_SCHUR:
     case SPARSE_SCHUR:
     case ITERATIVE_SCHUR:
     case CGNR:
-      return new ProgramEvaluator<BlockEvaluatePreparer,
-                                  BlockJacobianWriter>(options,
-                                                       program);
+      return new ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>(
+          options, program);
     case SPARSE_NORMAL_CHOLESKY:
       if (options.dynamic_sparsity) {
         return new ProgramEvaluator<ScratchEvaluatePreparer,
                                     DynamicCompressedRowJacobianWriter,
                                     DynamicCompressedRowJacobianFinalizer>(
-                                        options, program);
+            options, program);
       } else {
-        return new ProgramEvaluator<BlockEvaluatePreparer,
-                                    BlockJacobianWriter>(options,
-                                                         program);
+        return new ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>(
+            options, program);
       }
 
     default:
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
index b820958..a668445 100644
--- a/internal/ceres/evaluator.h
+++ b/internal/ceres/evaluator.h
@@ -124,12 +124,8 @@
                 double* residuals,
                 double* gradient,
                 SparseMatrix* jacobian) {
-    return Evaluate(EvaluateOptions(),
-                    state,
-                    cost,
-                    residuals,
-                    gradient,
-                    jacobian);
+    return Evaluate(
+        EvaluateOptions(), state, cost, residuals, gradient, jacobian);
   }
 
   // Make a change delta (of size NumEffectiveParameters()) to state (of size
@@ -152,7 +148,7 @@
 
   // This is the effective number of parameters that the optimizer may adjust.
   // This applies when there are parameterizations on some of the parameters.
-  virtual int NumEffectiveParameters()  const = 0;
+  virtual int NumEffectiveParameters() const = 0;
 
   // The number of residuals in the optimization problem.
   virtual int NumResiduals() const = 0;
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
index 4f023d1..5ddb733 100644
--- a/internal/ceres/evaluator_test.cc
+++ b/internal/ceres/evaluator_test.cc
@@ -34,6 +34,7 @@
 #include "ceres/evaluator.h"
 
 #include <memory>
+
 #include "ceres/casts.h"
 #include "ceres/cost_function.h"
 #include "ceres/crs_matrix.h"
@@ -105,17 +106,16 @@
   EvaluatorTestOptions(LinearSolverType linear_solver_type,
                        int num_eliminate_blocks,
                        bool dynamic_sparsity = false)
-    : linear_solver_type(linear_solver_type),
-      num_eliminate_blocks(num_eliminate_blocks),
-      dynamic_sparsity(dynamic_sparsity) {}
+      : linear_solver_type(linear_solver_type),
+        num_eliminate_blocks(num_eliminate_blocks),
+        dynamic_sparsity(dynamic_sparsity) {}
 
   LinearSolverType linear_solver_type;
   int num_eliminate_blocks;
   bool dynamic_sparsity;
 };
 
-struct EvaluatorTest
-    : public ::testing::TestWithParam<EvaluatorTestOptions> {
+struct EvaluatorTest : public ::testing::TestWithParam<EvaluatorTestOptions> {
   Evaluator* CreateEvaluator(Program* program) {
     // This program is straight from the ProblemImpl, and so has no index/offset
     // yet; compute it here as required by the evaluator implementations.
@@ -123,13 +123,15 @@
 
     if (VLOG_IS_ON(1)) {
       string report;
-      StringAppendF(&report, "Creating evaluator with type: %d",
+      StringAppendF(&report,
+                    "Creating evaluator with type: %d",
                     GetParam().linear_solver_type);
       if (GetParam().linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
-        StringAppendF(&report, ", dynamic_sparsity: %d",
-                      GetParam().dynamic_sparsity);
+        StringAppendF(
+            &report, ", dynamic_sparsity: %d", GetParam().dynamic_sparsity);
       }
-      StringAppendF(&report, " and num_eliminate_blocks: %d",
+      StringAppendF(&report,
+                    " and num_eliminate_blocks: %d",
                     GetParam().num_eliminate_blocks);
       VLOG(1) << report;
     }
@@ -142,7 +144,7 @@
     return Evaluator::Create(options, program, &error);
   }
 
-  void EvaluateAndCompare(ProblemImpl *problem,
+  void EvaluateAndCompare(ProblemImpl* problem,
                           int expected_num_rows,
                           int expected_num_cols,
                           double expected_cost,
@@ -171,12 +173,14 @@
 
     vector<double> state(evaluator->NumParameters());
 
+    // clang-format off
     ASSERT_TRUE(evaluator->Evaluate(
           &state[0],
           &cost,
           expected_residuals != nullptr ? &residuals[0]  : nullptr,
           expected_gradient  != nullptr ? &gradient[0]   : nullptr,
           expected_jacobian  != nullptr ? jacobian.get() : nullptr));
+    // clang-format on
 
     Matrix actual_jacobian;
     if (expected_jacobian != nullptr) {
@@ -196,15 +200,15 @@
   }
 
   // Try all combinations of parameters for the evaluator.
-  void CheckAllEvaluationCombinations(const ExpectedEvaluation &expected) {
+  void CheckAllEvaluationCombinations(const ExpectedEvaluation& expected) {
     for (int i = 0; i < 8; ++i) {
       EvaluateAndCompare(&problem,
                          expected.num_rows,
                          expected.num_cols,
                          expected.cost,
                          (i & 1) ? expected.residuals : nullptr,
-                         (i & 2) ? expected.gradient  : nullptr,
-                         (i & 4) ? expected.jacobian  : nullptr);
+                         (i & 2) ? expected.gradient : nullptr,
+                         (i & 4) ? expected.jacobian : nullptr);
     }
   }
 
@@ -217,15 +221,15 @@
 };
 
 static void SetSparseMatrixConstant(SparseMatrix* sparse_matrix, double value) {
-  VectorRef(sparse_matrix->mutable_values(),
-            sparse_matrix->num_nonzeros()).setConstant(value);
+  VectorRef(sparse_matrix->mutable_values(), sparse_matrix->num_nonzeros())
+      .setConstant(value);
 }
 
 TEST_P(EvaluatorTest, SingleResidualProblem) {
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
-                           nullptr,
-                           x, y, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>, nullptr, x, y, z);
 
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     3, 9,
@@ -245,14 +249,15 @@
       1, 2,   1, 2, 3,   1, 2, 3, 4
     }
   };
+  // clang-format on
   CheckAllEvaluationCombinations(expected);
 }
 
 TEST_P(EvaluatorTest, SingleResidualProblemWithPermutedParameters) {
   // Add the parameters in explicit order to force the ordering in the program.
-  problem.AddParameterBlock(x,  2);
-  problem.AddParameterBlock(y,  3);
-  problem.AddParameterBlock(z,  4);
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(z, 4);
 
   // Then use a cost function which is similar to the others, but swap around
   // the ordering of the parameters to the cost function. This shouldn't affect
@@ -260,10 +265,10 @@
   // At one point the compressed row evaluator had a bug that went undetected
   // for a long time, since by chance most users added parameters to the problem
   // in the same order that they occurred as parameters to a cost function.
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 4, 3, 2>,
-                           nullptr,
-                           z, y, x);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<1, 3, 4, 3, 2>, nullptr, z, y, x);
 
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     3, 9,
@@ -283,6 +288,7 @@
       1, 2,   1, 2, 3,   1, 2, 3, 4
     }
   };
+  // clang-format on
   CheckAllEvaluationCombinations(expected);
 }
 
@@ -303,10 +309,10 @@
   problem.AddParameterBlock(z, 4);
   problem.AddParameterBlock(d, 3);
 
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
-                           nullptr,
-                           x, y, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>, nullptr, x, y, z);
 
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     3, 16,
@@ -330,30 +336,29 @@
       0, 0,    1, 2,    0,    1, 2, 3,    0,    1, 2, 3, 4,    0, 0, 0
     }
   };
+  // clang-format on
   CheckAllEvaluationCombinations(expected);
 }
 
 TEST_P(EvaluatorTest, MultipleResidualProblem) {
   // Add the parameters in explicit order to force the ordering in the program.
-  problem.AddParameterBlock(x,  2);
-  problem.AddParameterBlock(y,  3);
-  problem.AddParameterBlock(z,  4);
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(z, 4);
 
   // f(x, y) in R^2
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
-                           nullptr,
-                           x, y);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<1, 2, 2, 3>, nullptr, x, y);
 
   // g(x, z) in R^3
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
-                           nullptr,
-                           x, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<2, 3, 2, 4>, nullptr, x, z);
 
   // h(y, z) in R^4
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
-                           nullptr,
-                           y, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<3, 4, 3, 4>, nullptr, y, z);
 
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     9, 9,
@@ -385,12 +390,13 @@
                       0, 0,    3, 6, 9,    3, 6, 9, 12
     }
   };
+  // clang-format on
   CheckAllEvaluationCombinations(expected);
 }
 
 TEST_P(EvaluatorTest, MultipleResidualsWithLocalParameterizations) {
   // Add the parameters in explicit order to force the ordering in the program.
-  problem.AddParameterBlock(x,  2);
+  problem.AddParameterBlock(x, 2);
 
   // Fix y's first dimension.
   vector<int> y_fixed;
@@ -403,20 +409,18 @@
   problem.AddParameterBlock(z, 4, new SubsetParameterization(4, z_fixed));
 
   // f(x, y) in R^2
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
-                           nullptr,
-                           x, y);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<1, 2, 2, 3>, nullptr, x, y);
 
   // g(x, z) in R^3
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
-                           nullptr,
-                           x, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<2, 3, 2, 4>, nullptr, x, z);
 
   // h(y, z) in R^4
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
-                           nullptr,
-                           y, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<3, 4, 3, 4>, nullptr, y, z);
 
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     9, 7,
@@ -448,6 +452,7 @@
                       0, 0,    6, 9,    3, 9, 12
     }
   };
+  // clang-format on
   CheckAllEvaluationCombinations(expected);
 }
 
@@ -458,24 +463,21 @@
   double z[4];
 
   // Add the parameters in explicit order to force the ordering in the program.
-  problem.AddParameterBlock(x,  2);
-  problem.AddParameterBlock(y,  3);
-  problem.AddParameterBlock(z,  4);
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(z, 4);
 
   // f(x, y) in R^2
- problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
-                          nullptr,
-                          x, y);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<1, 2, 2, 3>, nullptr, x, y);
 
   // g(x, z) in R^3
- problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
-                          nullptr,
-                          x, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<2, 3, 2, 4>, nullptr, x, z);
 
   // h(y, z) in R^4
-  problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
-                           nullptr,
-                           y, z);
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<3, 4, 3, 4>, nullptr, y, z);
 
   // For this test, "z" is constant.
   problem.SetParameterBlockConstant(z);
@@ -493,6 +495,7 @@
   ParameterBlock* parameter_block_z = parameter_blocks->back();
   parameter_blocks->pop_back();
 
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     9, 5,
@@ -523,6 +526,7 @@
                       0, 0,    3, 6, 9
     }
   };
+  // clang-format on
   CheckAllEvaluationCombinations(expected);
 
   // Restore parameter block z, so it will get freed in a consistent way.
diff --git a/internal/ceres/evaluator_test_utils.cc b/internal/ceres/evaluator_test_utils.cc
index 36dc21c..25801db 100644
--- a/internal/ceres/evaluator_test_utils.cc
+++ b/internal/ceres/evaluator_test_utils.cc
@@ -30,6 +30,7 @@
 //         sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/evaluator_test_utils.h"
+
 #include "ceres/internal/eigen.h"
 #include "gtest/gtest.h"
 
@@ -51,37 +52,39 @@
   if (expected_residuals != NULL) {
     ConstVectorRef expected_residuals_vector(expected_residuals,
                                              expected_num_rows);
-    ConstVectorRef actual_residuals_vector(actual_residuals,
-                                           expected_num_rows);
-    EXPECT_TRUE((actual_residuals_vector.array() ==
-                 expected_residuals_vector.array()).all())
-        << "Actual:\n" << actual_residuals_vector
-        << "\nExpected:\n" << expected_residuals_vector;
+    ConstVectorRef actual_residuals_vector(actual_residuals, expected_num_rows);
+    EXPECT_TRUE(
+        (actual_residuals_vector.array() == expected_residuals_vector.array())
+            .all())
+        << "Actual:\n"
+        << actual_residuals_vector << "\nExpected:\n"
+        << expected_residuals_vector;
   }
 
   if (expected_gradient != NULL) {
     ConstVectorRef expected_gradient_vector(expected_gradient,
                                             expected_num_cols);
-    ConstVectorRef actual_gradient_vector(actual_gradient,
-                                            expected_num_cols);
+    ConstVectorRef actual_gradient_vector(actual_gradient, expected_num_cols);
 
-    EXPECT_TRUE((actual_gradient_vector.array() ==
-                 expected_gradient_vector.array()).all())
-        << "Actual:\n" << actual_gradient_vector.transpose()
-        << "\nExpected:\n" << expected_gradient_vector.transpose();
+    EXPECT_TRUE(
+        (actual_gradient_vector.array() == expected_gradient_vector.array())
+            .all())
+        << "Actual:\n"
+        << actual_gradient_vector.transpose() << "\nExpected:\n"
+        << expected_gradient_vector.transpose();
   }
 
   if (expected_jacobian != NULL) {
-    ConstMatrixRef expected_jacobian_matrix(expected_jacobian,
-                                            expected_num_rows,
-                                            expected_num_cols);
-    ConstMatrixRef actual_jacobian_matrix(actual_jacobian,
-                                          expected_num_rows,
-                                          expected_num_cols);
-    EXPECT_TRUE((actual_jacobian_matrix.array() ==
-                 expected_jacobian_matrix.array()).all())
-        << "Actual:\n" << actual_jacobian_matrix
-        << "\nExpected:\n" << expected_jacobian_matrix;
+    ConstMatrixRef expected_jacobian_matrix(
+        expected_jacobian, expected_num_rows, expected_num_cols);
+    ConstMatrixRef actual_jacobian_matrix(
+        actual_jacobian, expected_num_rows, expected_num_cols);
+    EXPECT_TRUE(
+        (actual_jacobian_matrix.array() == expected_jacobian_matrix.array())
+            .all())
+        << "Actual:\n"
+        << actual_jacobian_matrix << "\nExpected:\n"
+        << expected_jacobian_matrix;
   }
 }
 
diff --git a/internal/ceres/file.cc b/internal/ceres/file.cc
index c95a44d..94f2135 100644
--- a/internal/ceres/file.cc
+++ b/internal/ceres/file.cc
@@ -33,6 +33,7 @@
 #include "ceres/file.h"
 
 #include <cstdio>
+
 #include "glog/logging.h"
 
 namespace ceres {
@@ -40,7 +41,7 @@
 
 using std::string;
 
-void WriteStringToFileOrDie(const string &data, const string &filename) {
+void WriteStringToFileOrDie(const string& data, const string& filename) {
   FILE* file_descriptor = fopen(filename.c_str(), "wb");
   if (!file_descriptor) {
     LOG(FATAL) << "Couldn't write to file: " << filename;
@@ -49,7 +50,7 @@
   fclose(file_descriptor);
 }
 
-void ReadFileToStringOrDie(const string &filename, string *data) {
+void ReadFileToStringOrDie(const string& filename, string* data) {
   FILE* file_descriptor = fopen(filename.c_str(), "r");
 
   if (!file_descriptor) {
@@ -63,10 +64,8 @@
 
   // Read the data.
   fseek(file_descriptor, 0L, SEEK_SET);
-  int num_read = fread(&((*data)[0]),
-                       sizeof((*data)[0]),
-                       num_bytes,
-                       file_descriptor);
+  int num_read =
+      fread(&((*data)[0]), sizeof((*data)[0]), num_bytes, file_descriptor);
   if (num_read != num_bytes) {
     LOG(FATAL) << "Couldn't read all of " << filename
                << "expected bytes: " << num_bytes * sizeof((*data)[0])
@@ -77,9 +76,9 @@
 
 string JoinPath(const string& dirname, const string& basename) {
 #ifdef _WIN32
-    static const char separator = '\\';
+  static const char separator = '\\';
 #else
-    static const char separator = '/';
+  static const char separator = '/';
 #endif  // _WIN32
 
   if ((!basename.empty() && basename[0] == separator) || dirname.empty()) {
diff --git a/internal/ceres/file.h b/internal/ceres/file.h
index 219b459..30c0225 100644
--- a/internal/ceres/file.h
+++ b/internal/ceres/file.h
@@ -34,14 +34,15 @@
 #define CERES_INTERNAL_FILE_H_
 
 #include <string>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
 namespace internal {
 
-void WriteStringToFileOrDie(const std::string &data,
-                            const std::string &filename);
-void ReadFileToStringOrDie(const std::string &filename, std::string *data);
+void WriteStringToFileOrDie(const std::string& data,
+                            const std::string& filename);
+void ReadFileToStringOrDie(const std::string& filename, std::string* data);
 
 // Join two path components, adding a slash if necessary.  If basename is an
 // absolute path then JoinPath ignores dirname and simply returns basename.
diff --git a/internal/ceres/fixed_array_test.cc b/internal/ceres/fixed_array_test.cc
index 95cba7f..d418786 100644
--- a/internal/ceres/fixed_array_test.cc
+++ b/internal/ceres/fixed_array_test.cc
@@ -15,6 +15,7 @@
 #include "ceres/internal/fixed_array.h"
 
 #include <stdio.h>
+
 #include <cstring>
 #include <list>
 #include <memory>
diff --git a/internal/ceres/float_cxsparse.h b/internal/ceres/float_cxsparse.h
index 57fc5e4..9a274c2 100644
--- a/internal/ceres/float_cxsparse.h
+++ b/internal/ceres/float_cxsparse.h
@@ -37,6 +37,7 @@
 #if !defined(CERES_NO_CXSPARSE)
 
 #include <memory>
+
 #include "ceres/sparse_cholesky.h"
 
 namespace ceres {
@@ -46,8 +47,7 @@
 // CXSparse.
 class FloatCXSparseCholesky : public SparseCholesky {
  public:
-  static std::unique_ptr<SparseCholesky> Create(
-      OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
 };
 
 }  // namespace internal
diff --git a/internal/ceres/float_suitesparse.h b/internal/ceres/float_suitesparse.h
index ac4d409..c436da4 100644
--- a/internal/ceres/float_suitesparse.h
+++ b/internal/ceres/float_suitesparse.h
@@ -32,9 +32,12 @@
 #define CERES_INTERNAL_FLOAT_SUITESPARSE_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include <memory>
+
 #include "ceres/sparse_cholesky.h"
 
 #if !defined(CERES_NO_SUITESPARSE)
@@ -46,8 +49,7 @@
 // SuiteSparse.
 class FloatSuiteSparseCholesky : public SparseCholesky {
  public:
-  static std::unique_ptr<SparseCholesky> Create(
-      OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
 };
 
 }  // namespace internal
diff --git a/internal/ceres/function_sample.cc b/internal/ceres/function_sample.cc
index 2fd3dbd..3e0ae60 100644
--- a/internal/ceres/function_sample.cc
+++ b/internal/ceres/function_sample.cc
@@ -29,6 +29,7 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/function_sample.h"
+
 #include "ceres/stringprintf.h"
 
 namespace ceres {
@@ -64,9 +65,14 @@
       gradient_is_valid(true) {}
 
 std::string FunctionSample::ToDebugString() const {
-  return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
-                      "value_is_valid: %d, gradient_is_valid: %d]",
-                      x, value, gradient, value_is_valid, gradient_is_valid);
+  return StringPrintf(
+      "[x: %.8e, value: %.8e, gradient: %.8e, "
+      "value_is_valid: %d, gradient_is_valid: %d]",
+      x,
+      value,
+      gradient,
+      value_is_valid,
+      gradient_is_valid);
 }
 
 }  // namespace internal
diff --git a/internal/ceres/function_sample.h b/internal/ceres/function_sample.h
index df79aef..8889f76 100644
--- a/internal/ceres/function_sample.h
+++ b/internal/ceres/function_sample.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_FUNCTION_SAMPLE_H_
 
 #include <string>
+
 #include "ceres/internal/eigen.h"
 
 namespace ceres {
@@ -85,9 +86,6 @@
   bool gradient_is_valid;
 };
 
-
-
-
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/generated/.clang-format b/internal/ceres/generated/.clang-format
new file mode 100644
index 0000000..9d15924
--- /dev/null
+++ b/internal/ceres/generated/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/internal/ceres/generated_bundle_adjustment_tests/.clang-format b/internal/ceres/generated_bundle_adjustment_tests/.clang-format
new file mode 100644
index 0000000..9d15924
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/internal/ceres/gmock/.clang-format b/internal/ceres/gmock/.clang-format
new file mode 100644
index 0000000..9d15924
--- /dev/null
+++ b/internal/ceres/gmock/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/internal/ceres/gradient_checker.cc b/internal/ceres/gradient_checker.cc
index ef56666..dadaaa0 100644
--- a/internal/ceres/gradient_checker.cc
+++ b/internal/ceres/gradient_checker.cc
@@ -56,7 +56,7 @@
 // the local space of the respective local parameterizations.
 bool EvaluateCostFunction(
     const ceres::CostFunction* function,
-    double const* const * parameters,
+    double const* const* parameters,
     const std::vector<const ceres::LocalParameterization*>&
         local_parameterizations,
     Vector* residuals,
@@ -95,8 +95,8 @@
   CHECK_NE(0, function->num_residuals());
   residuals->resize(function->num_residuals());
   residuals->setZero();
-  if (!function->Evaluate(parameters, residuals->data(),
-                          jacobian_data.data())) {
+  if (!function->Evaluate(
+          parameters, residuals->data(), jacobian_data.data())) {
     return false;
   }
 
@@ -109,20 +109,20 @@
       int local_size = local_parameterizations.at(i)->LocalSize();
       CHECK_EQ(jacobians->at(i).cols(), global_size);
       Matrix global_J_local(global_size, local_size);
-      local_parameterizations.at(i)->ComputeJacobian(
-          parameters[i], global_J_local.data());
+      local_parameterizations.at(i)->ComputeJacobian(parameters[i],
+                                                     global_J_local.data());
       local_jacobians->at(i).noalias() = jacobians->at(i) * global_J_local;
     }
   }
   return true;
 }
-} // namespace
+}  // namespace
 
 GradientChecker::GradientChecker(
-      const CostFunction* function,
-      const vector<const LocalParameterization*>* local_parameterizations,
-      const NumericDiffOptions& options) :
-        function_(function) {
+    const CostFunction* function,
+    const vector<const LocalParameterization*>* local_parameterizations,
+    const NumericDiffOptions& options)
+    : function_(function) {
   CHECK(function != nullptr);
   if (local_parameterizations != NULL) {
     local_parameterizations_ = *local_parameterizations;
@@ -132,8 +132,8 @@
   }
   DynamicNumericDiffCostFunction<CostFunction, RIDDERS>*
       finite_diff_cost_function =
-      new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
-          function, DO_NOT_TAKE_OWNERSHIP, options);
+          new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
+              function, DO_NOT_TAKE_OWNERSHIP, options);
   finite_diff_cost_function_.reset(finite_diff_cost_function);
 
   const vector<int32_t>& parameter_block_sizes =
@@ -145,7 +145,7 @@
   finite_diff_cost_function->SetNumResiduals(function->num_residuals());
 }
 
-bool GradientChecker::Probe(double const* const * parameters,
+bool GradientChecker::Probe(double const* const* parameters,
                             double relative_precision,
                             ProbeResults* results_param) const {
   int num_residuals = function_->num_residuals();
@@ -171,8 +171,12 @@
   // Evaluate the derivative using the user supplied code.
   vector<Matrix>& jacobians = results->jacobians;
   vector<Matrix>& local_jacobians = results->local_jacobians;
-  if (!EvaluateCostFunction(function_, parameters, local_parameterizations_,
-                       &results->residuals, &jacobians, &local_jacobians)) {
+  if (!EvaluateCostFunction(function_,
+                            parameters,
+                            local_parameterizations_,
+                            &results->residuals,
+                            &jacobians,
+                            &local_jacobians)) {
     results->error_log = "Function evaluation with Jacobians failed.";
     results->return_value = false;
   }
@@ -181,10 +185,14 @@
   vector<Matrix>& numeric_jacobians = results->numeric_jacobians;
   vector<Matrix>& local_numeric_jacobians = results->local_numeric_jacobians;
   Vector finite_diff_residuals;
-  if (!EvaluateCostFunction(finite_diff_cost_function_.get(), parameters,
-                            local_parameterizations_, &finite_diff_residuals,
-                            &numeric_jacobians, &local_numeric_jacobians)) {
-    results->error_log += "\nFunction evaluation with numerical "
+  if (!EvaluateCostFunction(finite_diff_cost_function_.get(),
+                            parameters,
+                            local_parameterizations_,
+                            &finite_diff_residuals,
+                            &numeric_jacobians,
+                            &local_numeric_jacobians)) {
+    results->error_log +=
+        "\nFunction evaluation with numerical "
         "differentiation failed.";
     results->return_value = false;
   }
@@ -194,13 +202,13 @@
   }
 
   for (int i = 0; i < num_residuals; ++i) {
-    if (!IsClose(
-        results->residuals[i],
-        finite_diff_residuals[i],
-        relative_precision,
-        NULL,
-        NULL)) {
-      results->error_log = "Function evaluation with and without Jacobians "
+    if (!IsClose(results->residuals[i],
+                 finite_diff_residuals[i],
+                 relative_precision,
+                 NULL,
+                 NULL)) {
+      results->error_log =
+          "Function evaluation with and without Jacobians "
           "resulted in different residuals.";
       LOG(INFO) << results->residuals.transpose();
       LOG(INFO) << finite_diff_residuals.transpose();
@@ -219,7 +227,7 @@
   for (int k = 0; k < function_->parameter_block_sizes().size(); k++) {
     StringAppendF(&error_log,
                   "========== "
-                  "Jacobian for " "block %d: (%ld by %ld)) "
+                  "Jacobian for block %d: (%ld by %ld)) "
                   "==========\n",
                   k,
                   static_cast<long>(local_jacobians[k].rows()),
@@ -234,28 +242,33 @@
         double term_jacobian = local_jacobians[k](i, j);
         double finite_jacobian = local_numeric_jacobians[k](i, j);
         double relative_error, absolute_error;
-        bool bad_jacobian_entry =
-            !IsClose(term_jacobian,
-                     finite_jacobian,
-                     relative_precision,
-                     &relative_error,
-                     &absolute_error);
+        bool bad_jacobian_entry = !IsClose(term_jacobian,
+                                           finite_jacobian,
+                                           relative_precision,
+                                           &relative_error,
+                                           &absolute_error);
         worst_relative_error = std::max(worst_relative_error, relative_error);
 
         StringAppendF(&error_log,
                       "%6d %4d %4d %17g %17g %17g %17g %17g %17g",
-                      k, i, j,
-                      term_jacobian, finite_jacobian,
-                      absolute_error, relative_error,
+                      k,
+                      i,
+                      j,
+                      term_jacobian,
+                      finite_jacobian,
+                      absolute_error,
+                      relative_error,
                       parameters[k][j],
                       results->residuals[i]);
 
         if (bad_jacobian_entry) {
           num_bad_jacobian_components++;
-          StringAppendF(
-              &error_log,
-              " ------ (%d,%d,%d) Relative error worse than %g",
-              k, i, j, relative_precision);
+          StringAppendF(&error_log,
+                        " ------ (%d,%d,%d) Relative error worse than %g",
+                        k,
+                        i,
+                        j,
+                        relative_precision);
         }
         error_log += "\n";
       }
@@ -264,11 +277,12 @@
 
   // Since there were some bad errors, dump comprehensive debug info.
   if (num_bad_jacobian_components) {
-    string header = StringPrintf("\nDetected %d bad Jacobian component(s). "
+    string header = StringPrintf(
+        "\nDetected %d bad Jacobian component(s). "
         "Worst relative error was %g.\n",
         num_bad_jacobian_components,
         worst_relative_error);
-     results->error_log = header + "\n" + error_log;
+    results->error_log = header + "\n" + error_log;
     return false;
   }
   return true;
diff --git a/internal/ceres/gradient_checker_test.cc b/internal/ceres/gradient_checker_test.cc
index b2dd35e..31dc97b 100644
--- a/internal/ceres/gradient_checker_test.cc
+++ b/internal/ceres/gradient_checker_test.cc
@@ -169,8 +169,6 @@
   vector<vector<double>> a_;  // our vectors.
 };
 
-
-
 static void CheckDimensions(const GradientChecker::ProbeResults& results,
                             const std::vector<int>& parameter_sizes,
                             const std::vector<int>& local_parameter_sizes,
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
index 13d6c58..2eb6d62 100644
--- a/internal/ceres/gradient_checking_cost_function.cc
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -38,6 +38,7 @@
 #include <string>
 #include <vector>
 
+#include "ceres/dynamic_numeric_diff_cost_function.h"
 #include "ceres/gradient_checker.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/parameter_block.h"
@@ -45,7 +46,6 @@
 #include "ceres/problem_impl.h"
 #include "ceres/program.h"
 #include "ceres/residual_block.h"
-#include "ceres/dynamic_numeric_diff_cost_function.h"
 #include "ceres/stringprintf.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
@@ -81,7 +81,7 @@
     set_num_residuals(function->num_residuals());
   }
 
-  virtual ~GradientCheckingCostFunction() { }
+  virtual ~GradientCheckingCostFunction() {}
 
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -92,9 +92,8 @@
     }
 
     GradientChecker::ProbeResults results;
-    bool okay = gradient_checker_.Probe(parameters,
-                                        relative_precision_,
-                                        &results);
+    bool okay =
+        gradient_checker_.Probe(parameters, relative_precision_, &results);
 
     // If the cost function returned false, there's nothing we can say about
     // the gradients.
@@ -117,8 +116,9 @@
     }
 
     if (!okay) {
-      std::string error_log = "Gradient Error detected!\nExtra info for "
-          "this residual: " + extra_info_ + "\n" + results.error_log;
+      std::string error_log =
+          "Gradient Error detected!\nExtra info for this residual: " +
+          extra_info_ + "\n" + results.error_log;
       callback_->SetGradientErrorDetected(error_log);
     }
     return true;
@@ -135,13 +135,12 @@
 }  // namespace
 
 GradientCheckingIterationCallback::GradientCheckingIterationCallback()
-    : gradient_error_detected_(false) {
-}
+    : gradient_error_detected_(false) {}
 
 CallbackReturnType GradientCheckingIterationCallback::operator()(
     const IterationSummary& summary) {
   if (gradient_error_detected_) {
-    LOG(ERROR)<< "Gradient error detected. Terminating solver.";
+    LOG(ERROR) << "Gradient error detected. Terminating solver.";
     return SOLVER_ABORT;
   }
   return SOLVER_CONTINUE;
@@ -166,7 +165,8 @@
   return new GradientCheckingCostFunction(cost_function,
                                           local_parameterizations,
                                           numeric_diff_options,
-                                          relative_precision, extra_info,
+                                          relative_precision,
+                                          extra_info,
                                           callback);
 }
 
@@ -193,8 +193,8 @@
   NumericDiffOptions numeric_diff_options;
   numeric_diff_options.relative_step_size = relative_step_size;
 
-  ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
-      gradient_checking_problem_options);
+  ProblemImpl* gradient_checking_problem_impl =
+      new ProblemImpl(gradient_checking_problem_options);
 
   Program* program = problem_impl->mutable_program();
 
@@ -213,7 +213,7 @@
           parameter_block->mutable_user_state());
     }
 
-    for (int i = 0; i <  parameter_block->Size(); ++i) {
+    for (int i = 0; i < parameter_block->Size(); ++i) {
       gradient_checking_problem_impl->SetParameterUpperBound(
           parameter_block->mutable_user_state(),
           i,
@@ -235,8 +235,8 @@
     // Build a human readable string which identifies the
     // ResidualBlock. This is used by the GradientCheckingCostFunction
     // when logging debugging information.
-    string extra_info = StringPrintf(
-        "Residual block id %d; depends on parameters [", i);
+    string extra_info =
+        StringPrintf("Residual block id %d; depends on parameters [", i);
     vector<double*> parameter_blocks;
     vector<const LocalParameterization*> local_parameterizations;
     parameter_blocks.reserve(residual_block->NumParameterBlocks());
@@ -277,13 +277,11 @@
   // depend on this being the case, so we explicitly call
   // SetParameterBlockStatePtrsToUserStatePtrs to ensure that this is
   // the case.
-  gradient_checking_problem_impl
-      ->mutable_program()
+  gradient_checking_problem_impl->mutable_program()
       ->SetParameterBlockStatePtrsToUserStatePtrs();
 
   return gradient_checking_problem_impl;
 }
 
-
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/gradient_checking_cost_function.h b/internal/ceres/gradient_checking_cost_function.h
index e9a34f7..ab6e9f8 100644
--- a/internal/ceres/gradient_checking_cost_function.h
+++ b/internal/ceres/gradient_checking_cost_function.h
@@ -60,6 +60,7 @@
   // Retrieve error status (not thread safe).
   bool gradient_error_detected() const { return gradient_error_detected_; }
   const std::string& error_log() const { return error_log_; }
+
  private:
   bool gradient_error_detected_;
   std::string error_log_;
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
index ff211ed..9ca51f8 100644
--- a/internal/ceres/gradient_checking_cost_function_test.cc
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -53,10 +53,10 @@
 namespace internal {
 
 using std::vector;
+using testing::_;
 using testing::AllOf;
 using testing::AnyNumber;
 using testing::HasSubstr;
-using testing::_;
 
 // Pick a (non-quadratic) function whose derivative are easy:
 //
@@ -65,12 +65,12 @@
 //
 // where 'a' is a vector of the same size as 'x'. In the block
 // version, they are both block vectors, of course.
-template<int bad_block = 1, int bad_variable = 2>
+template <int bad_block = 1, int bad_variable = 2>
 class TestTerm : public CostFunction {
  public:
   // The constructor of this function needs to know the number
   // of blocks desired, and the size of each block.
-  TestTerm(int arity, int const *dim) : arity_(arity) {
+  TestTerm(int arity, int const* dim) : arity_(arity) {
     // Make 'arity' random vectors.
     a_.resize(arity_);
     for (int j = 0; j < arity_; ++j) {
@@ -107,7 +107,7 @@
         if (jacobians[j]) {
           for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
             // See comments before class.
-            jacobians[j][u] = - f * a_[j][u];
+            jacobians[j][u] = -f * a_[j][u];
 
             if (bad_block == j && bad_variable == u) {
               // Whoopsiedoopsie! Deliberately introduce a faulty jacobian entry
@@ -135,7 +135,7 @@
 
   // Test with 3 blocks of size 2, 3 and 4.
   int const arity = 3;
-  int const dim[arity] = { 2, 3, 4 };
+  int const dim[arity] = {2, 3, 4};
 
   // Make a random set of blocks.
   vector<double*> parameters(arity);
@@ -164,17 +164,16 @@
   TestTerm<-1, -1> term(arity, dim);
   GradientCheckingIterationCallback callback;
   std::unique_ptr<CostFunction> gradient_checking_cost_function(
-      CreateGradientCheckingCostFunction(&term, NULL,
+      CreateGradientCheckingCostFunction(&term,
+                                         NULL,
                                          kRelativeStepSize,
                                          kRelativePrecision,
-                                         "Ignored.", &callback));
-  term.Evaluate(&parameters[0],
-                &original_residual,
-                &original_jacobians[0]);
+                                         "Ignored.",
+                                         &callback));
+  term.Evaluate(&parameters[0], &original_residual, &original_jacobians[0]);
 
-  gradient_checking_cost_function->Evaluate(&parameters[0],
-                                            &residual,
-                                            &jacobians[0]);
+  gradient_checking_cost_function->Evaluate(
+      &parameters[0], &residual, &jacobians[0]);
   EXPECT_EQ(original_residual, residual);
 
   for (int j = 0; j < arity; j++) {
@@ -193,7 +192,7 @@
 
   // Test with 3 blocks of size 2, 3 and 4.
   int const arity = 3;
-  int const dim[arity] = { 2, 3, 4 };
+  int const dim[arity] = {2, 3, 4};
 
   // Make a random set of blocks.
   vector<double*> parameters(arity);
@@ -221,17 +220,18 @@
     TestTerm<1, 2> term(arity, dim);
     GradientCheckingIterationCallback callback;
     std::unique_ptr<CostFunction> gradient_checking_cost_function(
-        CreateGradientCheckingCostFunction(&term, NULL,
+        CreateGradientCheckingCostFunction(&term,
+                                           NULL,
                                            kRelativeStepSize,
                                            kRelativePrecision,
-                                           "Fuzzy banana", &callback));
-    EXPECT_TRUE(
-        gradient_checking_cost_function->Evaluate(&parameters[0], &residual,
-                                                  &jacobians[0]));
+                                           "Fuzzy banana",
+                                           &callback));
+    EXPECT_TRUE(gradient_checking_cost_function->Evaluate(
+        &parameters[0], &residual, &jacobians[0]));
     EXPECT_TRUE(callback.gradient_error_detected());
     EXPECT_TRUE(callback.error_log().find("Fuzzy banana") != std::string::npos);
-    EXPECT_TRUE(callback.error_log().find("(1,0,2) Relative error worse than")
-                != std::string::npos);
+    EXPECT_TRUE(callback.error_log().find(
+                    "(1,0,2) Relative error worse than") != std::string::npos);
   }
 
   // The gradient is correct, so no errors are reported.
@@ -240,13 +240,14 @@
     TestTerm<-1, -1> term(arity, dim);
     GradientCheckingIterationCallback callback;
     std::unique_ptr<CostFunction> gradient_checking_cost_function(
-        CreateGradientCheckingCostFunction(&term, NULL,
+        CreateGradientCheckingCostFunction(&term,
+                                           NULL,
                                            kRelativeStepSize,
                                            kRelativePrecision,
-                                           "Fuzzy banana", &callback));
-    EXPECT_TRUE(
-        gradient_checking_cost_function->Evaluate(&parameters[0], &residual,
-                                                  &jacobians[0]));
+                                           "Fuzzy banana",
+                                           &callback));
+    EXPECT_TRUE(gradient_checking_cost_function->Evaluate(
+        &parameters[0], &residual, &jacobians[0]));
     EXPECT_FALSE(callback.gradient_error_detected());
   }
 
@@ -279,7 +280,7 @@
 };
 
 // Trivial cost function that accepts two arguments.
-class BinaryCostFunction: public CostFunction {
+class BinaryCostFunction : public CostFunction {
  public:
   BinaryCostFunction(int num_residuals,
                      int32_t parameter_block1_size,
@@ -300,7 +301,7 @@
 };
 
 // Trivial cost function that accepts three arguments.
-class TernaryCostFunction: public CostFunction {
+class TernaryCostFunction : public CostFunction {
  public:
   TernaryCostFunction(int num_residuals,
                       int32_t parameter_block1_size,
@@ -324,7 +325,7 @@
 
 // Verify that the two ParameterBlocks are formed from the same user
 // array and have the same LocalParameterization object.
-static void ParameterBlocksAreEquivalent(const ParameterBlock*  left,
+static void ParameterBlocksAreEquivalent(const ParameterBlock* left,
                                          const ParameterBlock* right) {
   CHECK(left != nullptr);
   CHECK(right != nullptr);
@@ -349,8 +350,10 @@
   problem_impl.SetParameterBlockConstant(y);
   problem_impl.AddParameterBlock(z, 5);
   problem_impl.AddParameterBlock(w, 4, new QuaternionParameterization);
-  problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
-  problem_impl.AddResidualBlock(new BinaryCostFunction(6, 5, 4) ,
+  // clang-format off
+  problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3),
+                                NULL, x);
+  problem_impl.AddResidualBlock(new BinaryCostFunction(6, 5, 4),
                                 NULL, z, y);
   problem_impl.AddResidualBlock(new BinaryCostFunction(3, 3, 5),
                                 new TrivialLoss, x, z);
@@ -358,6 +361,7 @@
                                 NULL, z, x);
   problem_impl.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4),
                                 NULL, z, x, y);
+  // clang-format on
 
   GradientCheckingIterationCallback callback;
   std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
@@ -392,8 +396,7 @@
 
   for (int i = 0; i < program.residual_blocks().size(); ++i) {
     // Compare the sizes of the two ResidualBlocks.
-    const ResidualBlock* original_residual_block =
-        program.residual_blocks()[i];
+    const ResidualBlock* original_residual_block = program.residual_blocks()[i];
     const ResidualBlock* new_residual_block =
         gradient_checking_program.residual_blocks()[i];
     EXPECT_EQ(original_residual_block->NumParameterBlocks(),
@@ -412,15 +415,14 @@
   }
 }
 
-
 TEST(GradientCheckingProblemImpl, ConstrainedProblemBoundsArePropagated) {
   // Parameter blocks with arbitrarily chosen initial values.
   double x[] = {1.0, 2.0, 3.0};
   ProblemImpl problem_impl;
   problem_impl.AddParameterBlock(x, 3);
   problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
-  problem_impl.SetParameterLowerBound(x,0,0.9);
-  problem_impl.SetParameterUpperBound(x,1,2.5);
+  problem_impl.SetParameterLowerBound(x, 0, 0.9);
+  problem_impl.SetParameterUpperBound(x, 1, 2.5);
 
   GradientCheckingIterationCallback callback;
   std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
diff --git a/internal/ceres/gradient_problem.cc b/internal/ceres/gradient_problem.cc
index 4ebd3e6..ba33fbc 100644
--- a/internal/ceres/gradient_problem.cc
+++ b/internal/ceres/gradient_problem.cc
@@ -29,6 +29,7 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/gradient_problem.h"
+
 #include "ceres/local_parameterization.h"
 #include "glog/logging.h"
 
@@ -38,14 +39,13 @@
     : function_(function),
       parameterization_(
           new IdentityParameterization(function_->NumParameters())),
-      scratch_(new double[function_->NumParameters()]) {
-}
+      scratch_(new double[function_->NumParameters()]) {}
 
 GradientProblem::GradientProblem(FirstOrderFunction* function,
                                  LocalParameterization* parameterization)
-      : function_(function),
-        parameterization_(parameterization),
-        scratch_(new double[function_->NumParameters()]) {
+    : function_(function),
+      parameterization_(parameterization),
+      scratch_(new double[function_->NumParameters()]) {
   CHECK_EQ(function_->NumParameters(), parameterization_->GlobalSize());
 }
 
@@ -57,7 +57,6 @@
   return parameterization_->LocalSize();
 }
 
-
 bool GradientProblem::Evaluate(const double* parameters,
                                double* cost,
                                double* gradient) const {
@@ -66,10 +65,8 @@
   }
 
   return (function_->Evaluate(parameters, cost, scratch_.get()) &&
-          parameterization_->MultiplyByJacobian(parameters,
-                                                1,
-                                                scratch_.get(),
-                                                gradient));
+          parameterization_->MultiplyByJacobian(
+              parameters, 1, scratch_.get(), gradient));
 }
 
 bool GradientProblem::Plus(const double* x,
diff --git a/internal/ceres/gradient_problem_evaluator.h b/internal/ceres/gradient_problem_evaluator.h
index c5ad1d7..d224dbe 100644
--- a/internal/ceres/gradient_problem_evaluator.h
+++ b/internal/ceres/gradient_problem_evaluator.h
@@ -76,9 +76,7 @@
     return problem_.Plus(state, delta, state_plus_delta);
   }
 
-  int NumParameters() const final {
-    return problem_.NumParameters();
-  }
+  int NumParameters() const final { return problem_.NumParameters(); }
 
   int NumEffectiveParameters() const final {
     return problem_.NumLocalParameters();
diff --git a/internal/ceres/gradient_problem_solver.cc b/internal/ceres/gradient_problem_solver.cc
index 1639e30..b72fad9 100644
--- a/internal/ceres/gradient_problem_solver.cc
+++ b/internal/ceres/gradient_problem_solver.cc
@@ -31,6 +31,7 @@
 #include "ceres/gradient_problem_solver.h"
 
 #include <memory>
+
 #include "ceres/callbacks.h"
 #include "ceres/gradient_problem.h"
 #include "ceres/gradient_problem_evaluator.h"
@@ -45,8 +46,8 @@
 #include "ceres/wall_time.h"
 
 namespace ceres {
-using internal::StringPrintf;
 using internal::StringAppendF;
+using internal::StringPrintf;
 using std::string;
 
 namespace {
@@ -83,7 +84,6 @@
 #undef COPY_OPTION
 }
 
-
 }  // namespace
 
 bool GradientProblemSolver::Options::IsValid(std::string* error) const {
@@ -92,8 +92,7 @@
   return solver_options.IsValid(error);
 }
 
-GradientProblemSolver::~GradientProblemSolver() {
-}
+GradientProblemSolver::~GradientProblemSolver() {}
 
 void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
                                   const GradientProblem& problem,
@@ -111,6 +110,7 @@
 
   CHECK(summary != nullptr);
   *summary = Summary();
+  // clang-format off
   summary->num_parameters                    = problem.NumParameters();
   summary->num_local_parameters              = problem.NumLocalParameters();
   summary->line_search_direction_type        = options.line_search_direction_type;         //  NOLINT
@@ -118,6 +118,7 @@
   summary->line_search_type                  = options.line_search_type;
   summary->max_lbfgs_rank                    = options.max_lbfgs_rank;
   summary->nonlinear_conjugate_gradient_type = options.nonlinear_conjugate_gradient_type;  //  NOLINT
+  // clang-format on
 
   // Check validity
   if (!options.IsValid(&summary->message)) {
@@ -163,11 +164,13 @@
 
   minimizer->Minimize(minimizer_options, solution.data(), &solver_summary);
 
+  // clang-format off
   summary->termination_type = solver_summary.termination_type;
   summary->message          = solver_summary.message;
   summary->initial_cost     = solver_summary.initial_cost;
   summary->final_cost       = solver_summary.final_cost;
   summary->iterations       = solver_summary.iterations;
+  // clang-format on
   summary->line_search_polynomial_minimization_time_in_seconds =
       solver_summary.line_search_polynomial_minimization_time_in_seconds;
 
@@ -200,15 +203,16 @@
 }
 
 string GradientProblemSolver::Summary::BriefReport() const {
-  return StringPrintf("Ceres GradientProblemSolver Report: "
-                      "Iterations: %d, "
-                      "Initial cost: %e, "
-                      "Final cost: %e, "
-                      "Termination: %s",
-                      static_cast<int>(iterations.size()),
-                      initial_cost,
-                      final_cost,
-                      TerminationTypeToString(termination_type));
+  return StringPrintf(
+      "Ceres GradientProblemSolver Report: "
+      "Iterations: %d, "
+      "Initial cost: %e, "
+      "Final cost: %e, "
+      "Termination: %s",
+      static_cast<int>(iterations.size()),
+      initial_cost,
+      final_cost,
+      TerminationTypeToString(termination_type));
 }
 
 string GradientProblemSolver::Summary::FullReport() const {
@@ -218,60 +222,63 @@
 
   StringAppendF(&report, "Parameters          % 25d\n", num_parameters);
   if (num_local_parameters != num_parameters) {
-    StringAppendF(&report, "Local parameters    % 25d\n",
-                  num_local_parameters);
+    StringAppendF(&report, "Local parameters    % 25d\n", num_local_parameters);
   }
 
   string line_search_direction_string;
   if (line_search_direction_type == LBFGS) {
     line_search_direction_string = StringPrintf("LBFGS (%d)", max_lbfgs_rank);
   } else if (line_search_direction_type == NONLINEAR_CONJUGATE_GRADIENT) {
-    line_search_direction_string =
-        NonlinearConjugateGradientTypeToString(
-            nonlinear_conjugate_gradient_type);
+    line_search_direction_string = NonlinearConjugateGradientTypeToString(
+        nonlinear_conjugate_gradient_type);
   } else {
     line_search_direction_string =
         LineSearchDirectionTypeToString(line_search_direction_type);
   }
 
-  StringAppendF(&report, "Line search direction     %19s\n",
+  StringAppendF(&report,
+                "Line search direction     %19s\n",
                 line_search_direction_string.c_str());
 
-  const string line_search_type_string =
-      StringPrintf("%s %s",
-                   LineSearchInterpolationTypeToString(
-                       line_search_interpolation_type),
-                   LineSearchTypeToString(line_search_type));
-  StringAppendF(&report, "Line search type          %19s\n",
+  const string line_search_type_string = StringPrintf(
+      "%s %s",
+      LineSearchInterpolationTypeToString(line_search_interpolation_type),
+      LineSearchTypeToString(line_search_type));
+  StringAppendF(&report,
+                "Line search type          %19s\n",
                 line_search_type_string.c_str());
   StringAppendF(&report, "\n");
 
   StringAppendF(&report, "\nCost:\n");
   StringAppendF(&report, "Initial        % 30e\n", initial_cost);
-  if (termination_type != FAILURE &&
-      termination_type != USER_FAILURE) {
+  if (termination_type != FAILURE && termination_type != USER_FAILURE) {
     StringAppendF(&report, "Final          % 30e\n", final_cost);
-    StringAppendF(&report, "Change         % 30e\n",
-                  initial_cost - final_cost);
+    StringAppendF(&report, "Change         % 30e\n", initial_cost - final_cost);
   }
 
-  StringAppendF(&report, "\nMinimizer iterations         % 16d\n",
+  StringAppendF(&report,
+                "\nMinimizer iterations         % 16d\n",
                 static_cast<int>(iterations.size()));
 
   StringAppendF(&report, "\nTime (in seconds):\n");
-  StringAppendF(&report, "\n  Cost evaluation     %23.6f (%d)\n",
+  StringAppendF(&report,
+                "\n  Cost evaluation     %23.6f (%d)\n",
                 cost_evaluation_time_in_seconds,
                 num_cost_evaluations);
-  StringAppendF(&report, "  Gradient & cost evaluation %16.6f (%d)\n",
+  StringAppendF(&report,
+                "  Gradient & cost evaluation %16.6f (%d)\n",
                 gradient_evaluation_time_in_seconds,
                 num_gradient_evaluations);
-  StringAppendF(&report, "  Polynomial minimization   %17.6f\n",
+  StringAppendF(&report,
+                "  Polynomial minimization   %17.6f\n",
                 line_search_polynomial_minimization_time_in_seconds);
-  StringAppendF(&report, "Total               %25.6f\n\n",
-                total_time_in_seconds);
+  StringAppendF(
+      &report, "Total               %25.6f\n\n", total_time_in_seconds);
 
-  StringAppendF(&report, "Termination:        %25s (%s)\n",
-                TerminationTypeToString(termination_type), message.c_str());
+  StringAppendF(&report,
+                "Termination:        %25s (%s)\n",
+                TerminationTypeToString(termination_type),
+                message.c_str());
   return report;
 }
 
diff --git a/internal/ceres/gradient_problem_solver_test.cc b/internal/ceres/gradient_problem_solver_test.cc
index 1a08c48..f01d206 100644
--- a/internal/ceres/gradient_problem_solver_test.cc
+++ b/internal/ceres/gradient_problem_solver_test.cc
@@ -28,9 +28,9 @@
 //
 // Author: strandmark@google.com (Petter Strandmark)
 
-#include "ceres/gradient_problem.h"
 #include "ceres/gradient_problem_solver.h"
 
+#include "ceres/gradient_problem.h"
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -89,18 +89,17 @@
 };
 
 struct RememberingCallback : public IterationCallback {
-  explicit RememberingCallback(double *x) : calls(0), x(x) {}
+  explicit RememberingCallback(double* x) : calls(0), x(x) {}
   virtual ~RememberingCallback() {}
   CallbackReturnType operator()(const IterationSummary& summary) final {
     x_values.push_back(*x);
     return SOLVER_CONTINUE;
   }
   int calls;
-  double *x;
+  double* x;
   std::vector<double> x_values;
 };
 
-
 TEST(Solver, UpdateStateEveryIterationOption) {
   double x = 50.0;
   const double original_x = x;
diff --git a/internal/ceres/gradient_problem_test.cc b/internal/ceres/gradient_problem_test.cc
index 640cf5f..8934138 100644
--- a/internal/ceres/gradient_problem_test.cc
+++ b/internal/ceres/gradient_problem_test.cc
@@ -65,9 +65,7 @@
 
 TEST(GradientProblem, TakesOwnershipOfFirstOrderFunction) {
   bool is_destructed = false;
-  {
-    ceres::GradientProblem problem(new QuadraticTestFunction(&is_destructed));
-  }
+  { ceres::GradientProblem problem(new QuadraticTestFunction(&is_destructed)); }
   EXPECT_TRUE(is_destructed);
 }
 
diff --git a/internal/ceres/graph.h b/internal/ceres/graph.h
index 4e1fd81..9b26158 100644
--- a/internal/ceres/graph.h
+++ b/internal/ceres/graph.h
@@ -32,9 +32,10 @@
 #define CERES_INTERNAL_GRAPH_H_
 
 #include <limits>
-#include <unordered_set>
 #include <unordered_map>
+#include <unordered_set>
 #include <utility>
+
 #include "ceres/map_util.h"
 #include "ceres/pair_hash.h"
 #include "ceres/types.h"
@@ -93,9 +94,7 @@
     return FindOrDie(edges_, vertex);
   }
 
-  const std::unordered_set<Vertex>& vertices() const {
-    return vertices_;
-  }
+  const std::unordered_set<Vertex>& vertices() const { return vertices_; }
 
  private:
   std::unordered_set<Vertex> vertices_;
@@ -121,9 +120,7 @@
 
   // Uses weight = 1.0. If vertex already exists, its weight is set to
   // 1.0.
-  void AddVertex(const Vertex& vertex) {
-    AddVertex(vertex, 1.0);
-  }
+  void AddVertex(const Vertex& vertex) { AddVertex(vertex, 1.0); }
 
   bool RemoveVertex(const Vertex& vertex) {
     if (vertices_.find(vertex) == vertices_.end()) {
@@ -184,11 +181,11 @@
   // the edge weight is zero.
   double EdgeWeight(const Vertex& vertex1, const Vertex& vertex2) const {
     if (vertex1 < vertex2) {
-      return FindWithDefault(edge_weights_,
-                             std::make_pair(vertex1, vertex2), 0.0);
+      return FindWithDefault(
+          edge_weights_, std::make_pair(vertex1, vertex2), 0.0);
     } else {
-      return FindWithDefault(edge_weights_,
-                             std::make_pair(vertex2, vertex1), 0.0);
+      return FindWithDefault(
+          edge_weights_, std::make_pair(vertex2, vertex1), 0.0);
     }
   }
 
@@ -198,9 +195,7 @@
     return FindOrDie(edges_, vertex);
   }
 
-  const std::unordered_set<Vertex>& vertices() const {
-    return vertices_;
-  }
+  const std::unordered_set<Vertex>& vertices() const { return vertices_; }
 
   static double InvalidWeight() {
     return std::numeric_limits<double>::quiet_NaN();
diff --git a/internal/ceres/graph_algorithms.h b/internal/ceres/graph_algorithms.h
index b062931..7d63b33 100644
--- a/internal/ceres/graph_algorithms.h
+++ b/internal/ceres/graph_algorithms.h
@@ -36,8 +36,9 @@
 #include <algorithm>
 #include <unordered_map>
 #include <unordered_set>
-#include <vector>
 #include <utility>
+#include <vector>
+
 #include "ceres/graph.h"
 #include "ceres/wall_time.h"
 #include "glog/logging.h"
@@ -50,8 +51,7 @@
 template <typename Vertex>
 class VertexTotalOrdering {
  public:
-  explicit VertexTotalOrdering(const Graph<Vertex>& graph)
-      : graph_(graph) {}
+  explicit VertexTotalOrdering(const Graph<Vertex>& graph) : graph_(graph) {}
 
   bool operator()(const Vertex& lhs, const Vertex& rhs) const {
     if (graph_.Neighbors(lhs).size() == graph_.Neighbors(rhs).size()) {
@@ -67,8 +67,7 @@
 template <typename Vertex>
 class VertexDegreeLessThan {
  public:
-  explicit VertexDegreeLessThan(const Graph<Vertex>& graph)
-      : graph_(graph) {}
+  explicit VertexDegreeLessThan(const Graph<Vertex>& graph) : graph_(graph) {}
 
   bool operator()(const Vertex& lhs, const Vertex& rhs) const {
     return graph_.Neighbors(lhs).size() < graph_.Neighbors(rhs).size();
@@ -177,8 +176,9 @@
 
   std::vector<Vertex> vertex_queue(*ordering);
 
-  std::stable_sort(vertex_queue.begin(), vertex_queue.end(),
-                  VertexDegreeLessThan<Vertex>(graph));
+  std::stable_sort(vertex_queue.begin(),
+                   vertex_queue.end(),
+                   VertexDegreeLessThan<Vertex>(graph));
 
   // Mark all vertices white.
   std::unordered_map<Vertex, char> vertex_color;
@@ -257,8 +257,8 @@
 // spanning forest, or a collection of linear paths that span the
 // graph G.
 template <typename Vertex>
-WeightedGraph<Vertex>*
-Degree2MaximumSpanningForest(const WeightedGraph<Vertex>& graph) {
+WeightedGraph<Vertex>* Degree2MaximumSpanningForest(
+    const WeightedGraph<Vertex>& graph) {
   // Array of edges sorted in decreasing order of their weights.
   std::vector<std::pair<double, std::pair<Vertex, Vertex>>> weighted_edges;
   WeightedGraph<Vertex>* forest = new WeightedGraph<Vertex>();
@@ -294,7 +294,7 @@
 
   // Greedily add edges to the spanning tree/forest as long as they do
   // not violate the degree/cycle constraint.
-  for (int i =0; i < weighted_edges.size(); ++i) {
+  for (int i = 0; i < weighted_edges.size(); ++i) {
     const std::pair<Vertex, Vertex>& edge = weighted_edges[i].second;
     const Vertex vertex1 = edge.first;
     const Vertex vertex2 = edge.second;
diff --git a/internal/ceres/graph_algorithms_test.cc b/internal/ceres/graph_algorithms_test.cc
index 2aef327..d5dd02e 100644
--- a/internal/ceres/graph_algorithms_test.cc
+++ b/internal/ceres/graph_algorithms_test.cc
@@ -111,8 +111,8 @@
   graph.AddEdge(0, 1, 0.5);
   graph.AddEdge(1, 0, 0.5);
 
-  std::unique_ptr<WeightedGraph<int> > forest(
-					      Degree2MaximumSpanningForest(graph));
+  std::unique_ptr<WeightedGraph<int>> forest(
+      Degree2MaximumSpanningForest(graph));
 
   const std::unordered_set<int>& vertices = forest->vertices();
   EXPECT_EQ(vertices.size(), 2);
@@ -135,7 +135,8 @@
   graph.AddEdge(0, 3, 3.0);
   graph.AddEdge(0, 4, 4.0);
 
-  std::unique_ptr<WeightedGraph<int> > forest(Degree2MaximumSpanningForest(graph));
+  std::unique_ptr<WeightedGraph<int>> forest(
+      Degree2MaximumSpanningForest(graph));
   const std::unordered_set<int>& vertices = forest->vertices();
   EXPECT_EQ(vertices.size(), 5);
 
@@ -200,7 +201,6 @@
   }
 }
 
-
 TEST(StableIndependentSet, BreakTies) {
   Graph<int> graph;
   graph.AddVertex(0);
diff --git a/internal/ceres/graph_test.cc b/internal/ceres/graph_test.cc
index 8f05475..2154a06 100644
--- a/internal/ceres/graph_test.cc
+++ b/internal/ceres/graph_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/graph.h"
 
 #include <unordered_set>
+
 #include "gtest/gtest.h"
 
 namespace ceres {
diff --git a/internal/ceres/gtest/.clang-format b/internal/ceres/gtest/.clang-format
new file mode 100644
index 0000000..9d15924
--- /dev/null
+++ b/internal/ceres/gtest/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/internal/ceres/householder_vector_test.cc b/internal/ceres/householder_vector_test.cc
index 10b3e88..6f3b172 100644
--- a/internal/ceres/householder_vector_test.cc
+++ b/internal/ceres/householder_vector_test.cc
@@ -29,6 +29,7 @@
 // Author: vitus@google.com (Michael Vitus)
 
 #include "ceres/internal/householder_vector.h"
+
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
index bf680d1..f2196d4 100644
--- a/internal/ceres/implicit_schur_complement.cc
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -43,13 +43,9 @@
 
 ImplicitSchurComplement::ImplicitSchurComplement(
     const LinearSolver::Options& options)
-    : options_(options),
-      D_(NULL),
-      b_(NULL) {
-}
+    : options_(options), D_(NULL), b_(NULL) {}
 
-ImplicitSchurComplement::~ImplicitSchurComplement() {
-}
+ImplicitSchurComplement::~ImplicitSchurComplement() {}
 
 void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
                                    const double* D,
@@ -88,7 +84,7 @@
   // the block diagonals and invert them.
   AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
   if (options_.preconditioner_type == JACOBI) {
-    AddDiagonalAndInvert((D_ ==  NULL) ? NULL : D_ + A_->num_cols_e(),
+    AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
                          block_diagonal_FtF_inverse_.get());
   }
 
@@ -125,8 +121,8 @@
   if (D_ != NULL) {
     ConstVectorRef Dref(D_ + A_->num_cols_e(), num_cols());
     VectorRef(y, num_cols()) =
-        (Dref.array().square() *
-         ConstVectorRef(x, num_cols()).array()).matrix();
+        (Dref.array().square() * ConstVectorRef(x, num_cols()).array())
+            .matrix();
   } else {
     VectorRef(y, num_cols()).setZero();
   }
@@ -139,8 +135,7 @@
 // entries D, add them to the diagonal of the matrix and compute the
 // inverse of each diagonal block.
 void ImplicitSchurComplement::AddDiagonalAndInvert(
-    const double* D,
-    BlockSparseMatrix* block_diagonal) {
+    const double* D, BlockSparseMatrix* block_diagonal) {
   const CompressedRowBlockStructure* block_diagonal_structure =
       block_diagonal->block_structure();
   for (int r = 0; r < block_diagonal_structure->rows.size(); ++r) {
@@ -148,17 +143,16 @@
     const int row_block_size = block_diagonal_structure->rows[r].block.size;
     const Cell& cell = block_diagonal_structure->rows[r].cells[0];
     MatrixRef m(block_diagonal->mutable_values() + cell.position,
-                row_block_size, row_block_size);
+                row_block_size,
+                row_block_size);
 
     if (D != NULL) {
       ConstVectorRef d(D + row_block_pos, row_block_size);
       m += d.array().square().matrix().asDiagonal();
     }
 
-    m = m
-        .selfadjointView<Eigen::Upper>()
-        .llt()
-        .solve(Matrix::Identity(row_block_size, row_block_size));
+    m = m.selfadjointView<Eigen::Upper>().llt().solve(
+        Matrix::Identity(row_block_size, row_block_size));
   }
 }
 
@@ -167,7 +161,7 @@
 void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
   const int num_cols_e = A_->num_cols_e();
   const int num_cols_f = A_->num_cols_f();
-  const int num_cols =  A_->num_cols();
+  const int num_cols = A_->num_cols();
   const int num_rows = A_->num_rows();
 
   // y1 = F x
@@ -190,7 +184,7 @@
   // computed via back substitution. The second block of variables
   // corresponds to the Schur complement system, so we just copy those
   // values from the solution to the Schur complement.
-  VectorRef(y + num_cols_e, num_cols_f) =  ConstVectorRef(x, num_cols_f);
+  VectorRef(y + num_cols_e, num_cols_f) = ConstVectorRef(x, num_cols_f);
 }
 
 // Compute the RHS of the Schur complement system.
diff --git a/internal/ceres/implicit_schur_complement.h b/internal/ceres/implicit_schur_complement.h
index f4ddf72..cc22f24 100644
--- a/internal/ceres/implicit_schur_complement.h
+++ b/internal/ceres/implicit_schur_complement.h
@@ -35,10 +35,11 @@
 #define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
 
 #include <memory>
+
+#include "ceres/internal/eigen.h"
 #include "ceres/linear_operator.h"
 #include "ceres/linear_solver.h"
 #include "ceres/partitioned_matrix_view.h"
-#include "ceres/internal/eigen.h"
 #include "ceres/types.h"
 
 namespace ceres {
@@ -129,7 +130,7 @@
 
   int num_rows() const final { return A_->num_cols_f(); }
   int num_cols() const final { return A_->num_cols_f(); }
-  const Vector& rhs()    const { return rhs_;             }
+  const Vector& rhs() const { return rhs_; }
 
   const BlockSparseMatrix* block_diagonal_EtE_inverse() const {
     return block_diagonal_EtE_inverse_.get();
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
index 4e3a598..b6d886f 100644
--- a/internal/ceres/implicit_schur_complement_test.cc
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -32,6 +32,7 @@
 
 #include <cstddef>
 #include <memory>
+
 #include "Eigen/Dense"
 #include "ceres/block_random_access_dense_matrix.h"
 #include "ceres/block_sparse_matrix.h"
@@ -54,7 +55,7 @@
 const double kEpsilon = 1e-14;
 
 class ImplicitSchurComplementTest : public ::testing::Test {
- protected :
+ protected:
   void SetUp() final {
     std::unique_ptr<LinearLeastSquaresProblem> problem(
         CreateLinearLeastSquaresProblemFromId(2));
@@ -115,8 +116,11 @@
     VectorRef schur_solution(solution->data() + num_cols_ - num_schur_rows,
                              num_schur_rows);
     schur_solution = lhs->selfadjointView<Eigen::Upper>().llt().solve(*rhs);
-    eliminator->BackSubstitute(BlockSparseMatrixData(*A_), b_.get(), D,
-                               schur_solution.data(), solution->data());
+    eliminator->BackSubstitute(BlockSparseMatrixData(*A_),
+                               b_.get(),
+                               D,
+                               schur_solution.data(),
+                               solution->data());
   }
 
   AssertionResult TestImplicitSchurComplement(double* D) {
@@ -150,18 +154,18 @@
       // the explicit schur complement.
       if ((y - z).norm() > kEpsilon) {
         return testing::AssertionFailure()
-            << "Explicit and Implicit SchurComplements differ in "
-            << "column " << i << ". explicit: " << y.transpose()
-            << " implicit: " << z.transpose();
+               << "Explicit and Implicit SchurComplements differ in "
+               << "column " << i << ". explicit: " << y.transpose()
+               << " implicit: " << z.transpose();
       }
     }
 
     // Compare the rhs of the reduced linear system
     if ((isc.rhs() - rhs).norm() > kEpsilon) {
       return testing::AssertionFailure()
-            << "Explicit and Implicit SchurComplements differ in "
-            << "rhs. explicit: " << rhs.transpose()
-            << " implicit: " << isc.rhs().transpose();
+             << "Explicit and Implicit SchurComplements differ in "
+             << "rhs. explicit: " << rhs.transpose()
+             << " implicit: " << isc.rhs().transpose();
     }
 
     // Reference solution to the f_block.
@@ -174,9 +178,9 @@
     isc.BackSubstitute(reference_f_sol.data(), sol.data());
     if ((sol - reference_solution).norm() > kEpsilon) {
       return testing::AssertionFailure()
-          << "Explicit and Implicit SchurComplements solutions differ. "
-          << "explicit: " << reference_solution.transpose()
-          << " implicit: " << sol.transpose();
+             << "Explicit and Implicit SchurComplements solutions differ. "
+             << "explicit: " << reference_solution.transpose()
+             << " implicit: " << sol.transpose();
     }
 
     return testing::AssertionSuccess();
diff --git a/internal/ceres/inner_product_computer.cc b/internal/ceres/inner_product_computer.cc
index 2bf8836..ef38b7b 100644
--- a/internal/ceres/inner_product_computer.cc
+++ b/internal/ceres/inner_product_computer.cc
@@ -31,12 +31,12 @@
 #include "ceres/inner_product_computer.h"
 
 #include <algorithm>
+
 #include "ceres/small_blas.h"
 
 namespace ceres {
 namespace internal {
 
-
 // Create the CompressedRowSparseMatrix matrix that will contain the
 // inner product.
 //
@@ -297,7 +297,7 @@
       const Cell& cell1 = m_row.cells[c1];
       const int c1_size = bs->cols[cell1.block_id].size;
       const int row_nnz = rows[bs->cols[cell1.block_id].position + 1] -
-          rows[bs->cols[cell1.block_id].position];
+                          rows[bs->cols[cell1.block_id].position];
 
       int c2_begin, c2_end;
       if (storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
@@ -311,6 +311,7 @@
       for (int c2 = c2_begin; c2 < c2_end; ++c2, ++cursor) {
         const Cell& cell2 = m_row.cells[c2];
         const int c2_size = bs->cols[cell2.block_id].size;
+        // clang-format off
         MatrixTransposeMatrixMultiply<Eigen::Dynamic, Eigen::Dynamic,
                                       Eigen::Dynamic, Eigen::Dynamic, 1>(
                                           m_values + cell1.position,
@@ -319,6 +320,7 @@
                                           m_row.block.size, c2_size,
                                           values + result_offsets_[cursor],
                                           0, 0, c1_size, row_nnz);
+        // clang-format on
       }
     }
   }
diff --git a/internal/ceres/inner_product_computer_test.cc b/internal/ceres/inner_product_computer_test.cc
index 31cd829..ac564f4 100644
--- a/internal/ceres/inner_product_computer_test.cc
+++ b/internal/ceres/inner_product_computer_test.cc
@@ -32,6 +32,8 @@
 
 #include <memory>
 #include <numeric>
+
+#include "Eigen/SparseCore"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/random.h"
@@ -39,8 +41,6 @@
 #include "glog/logging.h"
 #include "gtest/gtest.h"
 
-#include "Eigen/SparseCore"
-
 namespace ceres {
 namespace internal {
 
@@ -134,7 +134,6 @@
         inner_product_computer.reset(InnerProductComputer::Create(
             *random_matrix, CompressedRowSparseMatrix::UPPER_TRIANGULAR));
         COMPUTE_AND_COMPARE;
-
       }
     }
   }
@@ -215,7 +214,6 @@
             end_row_block,
             CompressedRowSparseMatrix::UPPER_TRIANGULAR));
         COMPUTE_AND_COMPARE;
-
       }
     }
   }
diff --git a/internal/ceres/invert_psd_matrix.h b/internal/ceres/invert_psd_matrix.h
index 21d301a..ac8808b 100644
--- a/internal/ceres/invert_psd_matrix.h
+++ b/internal/ceres/invert_psd_matrix.h
@@ -31,9 +31,9 @@
 #ifndef CERES_INTERNAL_INVERT_PSD_MATRIX_H_
 #define CERES_INTERNAL_INVERT_PSD_MATRIX_H_
 
+#include "Eigen/Dense"
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
-#include "Eigen/Dense"
 
 namespace ceres {
 namespace internal {
@@ -76,4 +76,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif // CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+#endif  // CERES_INTERNAL_INVERT_PSD_MATRIX_H_
diff --git a/internal/ceres/is_close.cc b/internal/ceres/is_close.cc
index a91a174..0becf55 100644
--- a/internal/ceres/is_close.cc
+++ b/internal/ceres/is_close.cc
@@ -35,9 +35,11 @@
 
 namespace ceres {
 namespace internal {
-bool IsClose(double x, double y, double relative_precision,
-             double *relative_error,
-             double *absolute_error) {
+bool IsClose(double x,
+             double y,
+             double relative_precision,
+             double* relative_error,
+             double* absolute_error) {
   double local_absolute_error;
   double local_relative_error;
   if (!absolute_error) {
diff --git a/internal/ceres/is_close.h b/internal/ceres/is_close.h
index 7789448..d0b5e6b 100644
--- a/internal/ceres/is_close.h
+++ b/internal/ceres/is_close.h
@@ -43,8 +43,8 @@
 bool IsClose(double x,
              double y,
              double relative_precision,
-             double *relative_error,
-             double *absolute_error);
+             double* relative_error,
+             double* absolute_error);
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/is_close_test.cc b/internal/ceres/is_close_test.cc
index 8f7aaba..12d6236 100644
--- a/internal/ceres/is_close_test.cc
+++ b/internal/ceres/is_close_test.cc
@@ -31,6 +31,7 @@
 // This file contains tests for the IsClose function.
 
 #include "ceres/is_close.h"
+
 #include "gtest/gtest.h"
 
 namespace ceres {
diff --git a/internal/ceres/iterative_refiner.cc b/internal/ceres/iterative_refiner.cc
index fb0e45b..5f0bfdd 100644
--- a/internal/ceres/iterative_refiner.cc
+++ b/internal/ceres/iterative_refiner.cc
@@ -28,9 +28,10 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include <string>
 #include "ceres/iterative_refiner.h"
 
+#include <string>
+
 #include "Eigen/Core"
 #include "ceres/sparse_cholesky.h"
 #include "ceres/sparse_matrix.h"
diff --git a/internal/ceres/iterative_refiner.h b/internal/ceres/iterative_refiner.h
index f969935..b2c39b5 100644
--- a/internal/ceres/iterative_refiner.h
+++ b/internal/ceres/iterative_refiner.h
@@ -32,7 +32,10 @@
 #define CERES_INTERNAL_ITERATIVE_REFINER_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
+
 #include "ceres/internal/eigen.h"
 
 namespace ceres {
diff --git a/internal/ceres/iterative_refiner_test.cc b/internal/ceres/iterative_refiner_test.cc
index c474ede..49887c6 100644
--- a/internal/ceres/iterative_refiner_test.cc
+++ b/internal/ceres/iterative_refiner_test.cc
@@ -93,8 +93,8 @@
   virtual ~FakeSparseCholesky() {}
 
   LinearSolverTerminationType Solve(const double* rhs_ptr,
-                                            double* solution_ptr,
-                                            std::string* message) final {
+                                    double* solution_ptr,
+                                    std::string* message) final {
     const int num_cols = lhs_.cols();
     VectorRef solution(solution_ptr, num_cols);
     ConstVectorRef rhs(rhs_ptr, num_cols);
@@ -106,14 +106,14 @@
   CompressedRowSparseMatrix::StorageType StorageType() const final
       DO_NOT_CALL_WITH_RETURN(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
   LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
-                                                std::string* message) final
+                                        std::string* message) final
       DO_NOT_CALL_WITH_RETURN(LINEAR_SOLVER_FAILURE);
 
-  LinearSolverTerminationType FactorAndSolve(
-      CompressedRowSparseMatrix* lhs,
-      const double* rhs,
-      double* solution,
-      std::string* message) final DO_NOT_CALL_WITH_RETURN(LINEAR_SOLVER_FAILURE);
+  LinearSolverTerminationType FactorAndSolve(CompressedRowSparseMatrix* lhs,
+                                             const double* rhs,
+                                             double* solution,
+                                             std::string* message) final
+      DO_NOT_CALL_WITH_RETURN(LINEAR_SOLVER_FAILURE);
 
  private:
   Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_;
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index 6076c38..143df5e 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -55,8 +55,7 @@
 
 IterativeSchurComplementSolver::IterativeSchurComplementSolver(
     const LinearSolver::Options& options)
-    : options_(options) {
-}
+    : options_(options) {}
 
 IterativeSchurComplementSolver::~IterativeSchurComplementSolver() {}
 
diff --git a/internal/ceres/iterative_schur_complement_solver.h b/internal/ceres/iterative_schur_complement_solver.h
index 9aed94f..ca002d2 100644
--- a/internal/ceres/iterative_schur_complement_solver.h
+++ b/internal/ceres/iterative_schur_complement_solver.h
@@ -32,8 +32,9 @@
 #define CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
 
 #include <memory>
-#include "ceres/linear_solver.h"
+
 #include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
 #include "ceres/types.h"
 
 namespace ceres {
@@ -70,17 +71,17 @@
 class IterativeSchurComplementSolver : public BlockSparseMatrixSolver {
  public:
   explicit IterativeSchurComplementSolver(const LinearSolver::Options& options);
-  IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) = delete;
+  IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) =
+      delete;
   void operator=(const IterativeSchurComplementSolver&) = delete;
 
   virtual ~IterativeSchurComplementSolver();
 
  private:
-  LinearSolver::Summary SolveImpl(
-      BlockSparseMatrix* A,
-      const double* b,
-      const LinearSolver::PerSolveOptions& options,
-      double* x) final;
+  LinearSolver::Summary SolveImpl(BlockSparseMatrix* A,
+                                  const double* b,
+                                  const LinearSolver::PerSolveOptions& options,
+                                  double* x) final;
 
   void CreatePreconditioner(BlockSparseMatrix* A);
 
diff --git a/internal/ceres/iterative_schur_complement_solver_test.cc b/internal/ceres/iterative_schur_complement_solver_test.cc
index 3bf2d92..fdd65c7 100644
--- a/internal/ceres/iterative_schur_complement_solver_test.cc
+++ b/internal/ceres/iterative_schur_complement_solver_test.cc
@@ -36,6 +36,7 @@
 
 #include <cstddef>
 #include <memory>
+
 #include "Eigen/Dense"
 #include "ceres/block_random_access_dense_matrix.h"
 #include "ceres/block_sparse_matrix.h"
@@ -58,7 +59,7 @@
 const double kEpsilon = 1e-14;
 
 class IterativeSchurComplementSolverTest : public ::testing::Test {
- protected :
+ protected:
   void SetUpProblem(int problem_id) {
     std::unique_ptr<LinearLeastSquaresProblem> problem(
         CreateLinearLeastSquaresProblemFromId(problem_id));
@@ -74,9 +75,8 @@
   }
 
   AssertionResult TestSolver(double* D) {
-    TripletSparseMatrix triplet_A(A_->num_rows(),
-                                  A_->num_cols(),
-                                  A_->num_nonzeros());
+    TripletSparseMatrix triplet_A(
+        A_->num_rows(), A_->num_cols(), A_->num_nonzeros());
     A_->ToTripletSparseMatrix(&triplet_A);
 
     DenseSparseMatrix dense_A(triplet_A);
@@ -99,15 +99,15 @@
     IterativeSchurComplementSolver isc(options);
 
     Vector isc_sol(num_cols_);
-    per_solve_options.r_tolerance  = 1e-12;
+    per_solve_options.r_tolerance = 1e-12;
     isc.Solve(A_.get(), b_.get(), per_solve_options, isc_sol.data());
     double diff = (isc_sol - reference_solution).norm();
     if (diff < kEpsilon) {
       return testing::AssertionSuccess();
     } else {
       return testing::AssertionFailure()
-          << "The reference solution differs from the ITERATIVE_SCHUR"
-          << " solution by " << diff << " which is more than " << kEpsilon;
+             << "The reference solution differs from the ITERATIVE_SCHUR"
+             << " solution by " << diff << " which is more than " << kEpsilon;
     }
   }
 
diff --git a/internal/ceres/jet_test.cc b/internal/ceres/jet_test.cc
index f1e9586..36f279d 100644
--- a/internal/ceres/jet_test.cc
+++ b/internal/ceres/jet_test.cc
@@ -62,30 +62,32 @@
 // On a 32-bit optimized build, the mismatch is about 1.4e-14.
 double const kTolerance = 1e-13;
 
-void ExpectJetsClose(const J &x, const J &y) {
+void ExpectJetsClose(const J& x, const J& y) {
   ExpectClose(x.a, y.a, kTolerance);
   ExpectClose(x.v[0], y.v[0], kTolerance);
   ExpectClose(x.v[1], y.v[1], kTolerance);
 }
 
 const double kStep = 1e-8;
-const double kNumericalTolerance = 1e-6; // Numeric derivation is quite inexact
+const double kNumericalTolerance = 1e-6;  // Numeric derivation is quite inexact
 
 // Differentiate using Jet and confirm results with numerical derivation.
-template<typename Function>
+template <typename Function>
 void NumericalTest(const char* name, const Function& f, const double x) {
   const double exact_dx = f(MakeJet(x, 1.0, 0.0)).v[0];
   const double estimated_dx =
-    (f(J(x + kStep)).a - f(J(x - kStep)).a) / (2.0 * kStep);
-  VL << name << "(" << x << "), exact dx: "
-     << exact_dx << ", estimated dx: " << estimated_dx;
+      (f(J(x + kStep)).a - f(J(x - kStep)).a) / (2.0 * kStep);
+  VL << name << "(" << x << "), exact dx: " << exact_dx
+     << ", estimated dx: " << estimated_dx;
   ExpectClose(exact_dx, estimated_dx, kNumericalTolerance);
 }
 
 // Same as NumericalTest, but given a function taking two arguments.
-template<typename Function>
-void NumericalTest2(const char* name, const Function& f,
-                    const double x, const double y) {
+template <typename Function>
+void NumericalTest2(const char* name,
+                    const Function& f,
+                    const double x,
+                    const double y) {
   const J exact_delta = f(MakeJet(x, 1.0, 0.0), MakeJet(y, 0.0, 1.0));
   const double exact_dx = exact_delta.v[0];
   const double exact_dy = exact_delta.v[1];
@@ -97,14 +99,14 @@
   EXPECT_EQ(exact_dy, f(MakeJet(x, 0.0, 0.0), MakeJet(y, 0.0, 1.0)).v[1]);
 
   const double estimated_dx =
-    (f(J(x + kStep), J(y)).a - f(J(x - kStep), J(y)).a) / (2.0 * kStep);
+      (f(J(x + kStep), J(y)).a - f(J(x - kStep), J(y)).a) / (2.0 * kStep);
   const double estimated_dy =
-    (f(J(x), J(y + kStep)).a - f(J(x), J(y - kStep)).a) / (2.0 * kStep);
-  VL << name << "(" << x << ", " << y << "), exact dx: "
-     << exact_dx << ", estimated dx: " << estimated_dx;
+      (f(J(x), J(y + kStep)).a - f(J(x), J(y - kStep)).a) / (2.0 * kStep);
+  VL << name << "(" << x << ", " << y << "), exact dx: " << exact_dx
+     << ", estimated dx: " << estimated_dx;
   ExpectClose(exact_dx, estimated_dx, kNumericalTolerance);
-  VL << name << "(" << x << ", " << y << "), exact dy: "
-     << exact_dy << ", estimated dy: " << estimated_dy;
+  VL << name << "(" << x << ", " << y << "), exact dy: " << exact_dy
+     << ", estimated dy: " << estimated_dy;
   ExpectClose(exact_dy, estimated_dy, kNumericalTolerance);
 }
 
@@ -113,12 +115,12 @@
 TEST(Jet, Jet) {
   // Pick arbitrary values for x and y.
   J x = MakeJet(2.3, -2.7, 1e-3);
-  J y = MakeJet(1.7,  0.5, 1e+2);
+  J y = MakeJet(1.7, 0.5, 1e+2);
 
   VL << "x = " << x;
   VL << "y = " << y;
 
-  { // Check that log(exp(x)) == x.
+  {  // Check that log(exp(x)) == x.
     J z = exp(x);
     J w = log(z);
     VL << "z = " << z;
@@ -126,7 +128,7 @@
     ExpectJetsClose(w, x);
   }
 
-  { // Check that (x * y) / x == y.
+  {  // Check that (x * y) / x == y.
     J z = x * y;
     J w = z / x;
     VL << "z = " << z;
@@ -134,7 +136,7 @@
     ExpectJetsClose(w, y);
   }
 
-  { // Check that sqrt(x * x) == x.
+  {  // Check that sqrt(x * x) == x.
     J z = x * x;
     J w = sqrt(z);
     VL << "z = " << z;
@@ -142,7 +144,7 @@
     ExpectJetsClose(w, x);
   }
 
-  { // Check that sqrt(y) * sqrt(y) == y.
+  {  // Check that sqrt(y) * sqrt(y) == y.
     J z = sqrt(y);
     J w = z * z;
     VL << "z = " << z;
@@ -153,23 +155,23 @@
   NumericalTest("sqrt", sqrt<double, 2>, 0.00001);
   NumericalTest("sqrt", sqrt<double, 2>, 1.0);
 
-  { // Check that cos(2*x) = cos(x)^2 - sin(x)^2
+  {  // Check that cos(2*x) = cos(x)^2 - sin(x)^2
     J z = cos(J(2.0) * x);
-    J w = cos(x)*cos(x) - sin(x)*sin(x);
+    J w = cos(x) * cos(x) - sin(x) * sin(x);
     VL << "z = " << z;
     VL << "w = " << w;
     ExpectJetsClose(w, z);
   }
 
-  { // Check that sin(2*x) = 2*cos(x)*sin(x)
+  {  // Check that sin(2*x) = 2*cos(x)*sin(x)
     J z = sin(J(2.0) * x);
-    J w = J(2.0)*cos(x)*sin(x);
+    J w = J(2.0) * cos(x) * sin(x);
     VL << "z = " << z;
     VL << "w = " << w;
     ExpectJetsClose(w, z);
   }
 
-  { // Check that cos(x)*cos(x) + sin(x)*sin(x) = 1
+  {  // Check that cos(x)*cos(x) + sin(x)*sin(x) = 1
     J z = cos(x) * cos(x);
     J w = sin(x) * sin(x);
     VL << "z = " << z;
@@ -177,7 +179,7 @@
     ExpectJetsClose(z + w, J(1.0));
   }
 
-  { // Check that atan2(r*sin(t), r*cos(t)) = t.
+  {  // Check that atan2(r*sin(t), r*cos(t)) = t.
     J t = MakeJet(0.7, -0.3, +1.5);
     J r = MakeJet(2.3, 0.13, -2.4);
     VL << "t = " << t;
@@ -189,7 +191,7 @@
     ExpectJetsClose(u, t);
   }
 
-  { // Check that tan(x) = sin(x) / cos(x).
+  {  // Check that tan(x) = sin(x) / cos(x).
     J z = tan(x);
     J w = sin(x) / cos(x);
     VL << "z = " << z;
@@ -197,7 +199,7 @@
     ExpectJetsClose(z, w);
   }
 
-  { // Check that tan(atan(x)) = x.
+  {  // Check that tan(atan(x)) = x.
     J z = tan(atan(x));
     J w = x;
     VL << "z = " << z;
@@ -205,7 +207,7 @@
     ExpectJetsClose(z, w);
   }
 
-  { // Check that cosh(x)*cosh(x) - sinh(x)*sinh(x) = 1
+  {  // Check that cosh(x)*cosh(x) - sinh(x)*sinh(x) = 1
     J z = cosh(x) * cosh(x);
     J w = sinh(x) * sinh(x);
     VL << "z = " << z;
@@ -213,7 +215,7 @@
     ExpectJetsClose(z - w, J(1.0));
   }
 
-  { // Check that tanh(x + y) = (tanh(x) + tanh(y)) / (1 + tanh(x) tanh(y))
+  {  // Check that tanh(x + y) = (tanh(x) + tanh(y)) / (1 + tanh(x) tanh(y))
     J z = tanh(x + y);
     J w = (tanh(x) + tanh(y)) / (J(1.0) + tanh(x) * tanh(y));
     VL << "z = " << z;
@@ -221,7 +223,7 @@
     ExpectJetsClose(z, w);
   }
 
-  { // Check that pow(x, 1) == x.
+  {  // Check that pow(x, 1) == x.
     VL << "x = " << x;
 
     J u = pow(x, 1.);
@@ -230,7 +232,7 @@
     ExpectJetsClose(x, u);
   }
 
-  { // Check that pow(x, 1) == x.
+  {  // Check that pow(x, 1) == x.
     J y = MakeJet(1, 0.0, 0.0);
     VL << "x = " << x;
     VL << "y = " << y;
@@ -241,7 +243,7 @@
     ExpectJetsClose(x, u);
   }
 
-  { // Check that pow(e, log(x)) == x.
+  {  // Check that pow(e, log(x)) == x.
     J logx = log(x);
 
     VL << "x = " << x;
@@ -253,7 +255,7 @@
     ExpectJetsClose(x, u);
   }
 
-  { // Check that pow(e, log(x)) == x.
+  {  // Check that pow(e, log(x)) == x.
     J logx = log(x);
     J e = MakeJet(kE, 0., 0.);
     VL << "x = " << x;
@@ -265,7 +267,7 @@
     ExpectJetsClose(x, u);
   }
 
-  { // Check that pow(e, log(x)) == x.
+  {  // Check that pow(e, log(x)) == x.
     J logx = log(x);
     J e = MakeJet(kE, 0., 0.);
     VL << "x = " << x;
@@ -277,13 +279,13 @@
     ExpectJetsClose(x, u);
   }
 
-  { // Check that pow(x,y) = exp(y*log(x)).
+  {  // Check that pow(x,y) = exp(y*log(x)).
     J logx = log(x);
     J e = MakeJet(kE, 0., 0.);
     VL << "x = " << x;
     VL << "logx = " << logx;
 
-    J u = pow(e, y*logx);
+    J u = pow(e, y * logx);
     J v = pow(x, y);
     VL << "u = " << u;
     VL << "v = " << v;
@@ -291,7 +293,7 @@
     ExpectJetsClose(v, u);
   }
 
-  { // Check that pow(0, y) == 0 for y > 1, with both arguments Jets.
+  {  // Check that pow(0, y) == 0 for y > 1, with both arguments Jets.
     // This tests special case handling inside pow().
     J a = MakeJet(0, 1, 2);
     J b = MakeJet(2, 3, 4);
@@ -303,7 +305,7 @@
     ExpectJetsClose(c, MakeJet(0, 0, 0));
   }
 
-  { // Check that pow(0, y) == 0 for y == 1, with both arguments Jets.
+  {  // Check that pow(0, y) == 0 for y == 1, with both arguments Jets.
     // This tests special case handling inside pow().
     J a = MakeJet(0, 1, 2);
     J b = MakeJet(1, 3, 4);
@@ -315,10 +317,10 @@
     ExpectJetsClose(c, MakeJet(0, 1, 2));
   }
 
-  { // Check that pow(0, <1) is not finite, with both arguments Jets.
+  {  // Check that pow(0, <1) is not finite, with both arguments Jets.
     for (int i = 1; i < 10; i++) {
       J a = MakeJet(0, 1, 2);
-      J b = MakeJet(i*0.1, 3, 4);       // b = 0.1 ... 0.9
+      J b = MakeJet(i * 0.1, 3, 4);  // b = 0.1 ... 0.9
       VL << "a = " << a;
       VL << "b = " << b;
 
@@ -330,7 +332,7 @@
     }
     for (int i = -10; i < 0; i++) {
       J a = MakeJet(0, 1, 2);
-      J b = MakeJet(i*0.1, 3, 4);       // b = -1,-0.9 ... -0.1
+      J b = MakeJet(i * 0.1, 3, 4);  // b = -1,-0.9 ... -0.1
       VL << "a = " << a;
       VL << "b = " << b;
 
@@ -356,7 +358,7 @@
     }
   }
 
-  { // Check that pow(<0, b) is correct for integer b.
+  {  // Check that pow(<0, b) is correct for integer b.
     // This tests special case handling inside pow().
     J a = MakeJet(-1.5, 3, 4);
 
@@ -375,7 +377,7 @@
     }
   }
 
-  { // Check that pow(<0, b) is correct for noninteger b.
+  {  // Check that pow(<0, b) is correct for noninteger b.
     // This tests special case handling inside pow().
     J a = MakeJet(-1.5, 3, 4);
     J b = MakeJet(-2.5, 0, 5);
@@ -435,7 +437,7 @@
     EXPECT_FALSE(IsFinite(c.v[1]));
   }
 
-  { // Check that 1 + x == x + 1.
+  {  // Check that 1 + x == x + 1.
     J a = x + 1.0;
     J b = 1.0 + x;
     J c = x;
@@ -445,7 +447,7 @@
     ExpectJetsClose(a, c);
   }
 
-  { // Check that 1 - x == -(x - 1).
+  {  // Check that 1 - x == -(x - 1).
     J a = 1.0 - x;
     J b = -(x - 1.0);
     J c = x;
@@ -455,7 +457,7 @@
     ExpectJetsClose(a, -c);
   }
 
-  { // Check that (x/s)*s == (x*s)/s.
+  {  // Check that (x/s)*s == (x*s)/s.
     J a = x / 5.0;
     J b = x * 5.0;
     J c = x;
@@ -468,7 +470,7 @@
     ExpectJetsClose(b, d);
   }
 
-  { // Check that x / y == 1 / (y / x).
+  {  // Check that x / y == 1 / (y / x).
     J a = x / y;
     J b = 1.0 / (y / x);
     VL << "a = " << a;
@@ -477,26 +479,26 @@
     ExpectJetsClose(a, b);
   }
 
-  { // Check that abs(-x * x) == sqrt(x * x).
+  {  // Check that abs(-x * x) == sqrt(x * x).
     ExpectJetsClose(abs(-x), sqrt(x * x));
   }
 
-  { // Check that cos(acos(x)) == x.
+  {  // Check that cos(acos(x)) == x.
     J a = MakeJet(0.1, -2.7, 1e-3);
     ExpectJetsClose(cos(acos(a)), a);
     ExpectJetsClose(acos(cos(a)), a);
 
-    J b = MakeJet(0.6,  0.5, 1e+2);
+    J b = MakeJet(0.6, 0.5, 1e+2);
     ExpectJetsClose(cos(acos(b)), b);
     ExpectJetsClose(acos(cos(b)), b);
   }
 
-  { // Check that sin(asin(x)) == x.
+  {  // Check that sin(asin(x)) == x.
     J a = MakeJet(0.1, -2.7, 1e-3);
     ExpectJetsClose(sin(asin(a)), a);
     ExpectJetsClose(asin(sin(a)), a);
 
-    J b = MakeJet(0.4,  0.5, 1e+2);
+    J b = MakeJet(0.4, 0.5, 1e+2);
     ExpectJetsClose(sin(asin(b)), b);
     ExpectJetsClose(asin(sin(b)), b);
   }
@@ -529,49 +531,49 @@
     ExpectJetsClose(BesselJ0(z) + BesselJn(2, z), (2.0 / z) * BesselJ1(z));
   }
 
-  { // Check that floor of a positive number works.
+  {  // Check that floor of a positive number works.
     J a = MakeJet(0.1, -2.7, 1e-3);
     J b = floor(a);
     J expected = MakeJet(floor(a.a), 0.0, 0.0);
     EXPECT_EQ(expected, b);
   }
 
-  { // Check that floor of a negative number works.
+  {  // Check that floor of a negative number works.
     J a = MakeJet(-1.1, -2.7, 1e-3);
     J b = floor(a);
     J expected = MakeJet(floor(a.a), 0.0, 0.0);
     EXPECT_EQ(expected, b);
   }
 
-  { // Check that floor of a positive number works.
+  {  // Check that floor of a positive number works.
     J a = MakeJet(10.123, -2.7, 1e-3);
     J b = floor(a);
     J expected = MakeJet(floor(a.a), 0.0, 0.0);
     EXPECT_EQ(expected, b);
   }
 
-  { // Check that ceil of a positive number works.
+  {  // Check that ceil of a positive number works.
     J a = MakeJet(0.1, -2.7, 1e-3);
     J b = ceil(a);
     J expected = MakeJet(ceil(a.a), 0.0, 0.0);
     EXPECT_EQ(expected, b);
   }
 
-  { // Check that ceil of a negative number works.
+  {  // Check that ceil of a negative number works.
     J a = MakeJet(-1.1, -2.7, 1e-3);
     J b = ceil(a);
     J expected = MakeJet(ceil(a.a), 0.0, 0.0);
     EXPECT_EQ(expected, b);
   }
 
-  { // Check that ceil of a positive number works.
+  {  // Check that ceil of a positive number works.
     J a = MakeJet(10.123, -2.7, 1e-3);
     J b = ceil(a);
     J expected = MakeJet(ceil(a.a), 0.0, 0.0);
     EXPECT_EQ(expected, b);
   }
 
-  { // Check that erf works.
+  {  // Check that erf works.
     J a = MakeJet(10.123, -2.7, 1e-3);
     J b = erf(a);
     J expected = MakeJet(erf(a.a), 0.0, 0.0);
@@ -582,7 +584,7 @@
   NumericalTest("erf", erf<double, 2>, 0.5);
   NumericalTest("erf", erf<double, 2>, 100.0);
 
-  { // Check that erfc works.
+  {  // Check that erfc works.
     J a = MakeJet(10.123, -2.7, 1e-3);
     J b = erfc(a);
     J expected = MakeJet(erfc(a.a), 0.0, 0.0);
@@ -593,7 +595,7 @@
   NumericalTest("erfc", erfc<double, 2>, 0.5);
   NumericalTest("erfc", erfc<double, 2>, 100.0);
 
-  { // Check that cbrt(x * x * x) == x.
+  {  // Check that cbrt(x * x * x) == x.
     J z = x * x * x;
     J w = cbrt(z);
     VL << "z = " << z;
@@ -601,7 +603,7 @@
     ExpectJetsClose(w, x);
   }
 
-  { // Check that cbrt(y) * cbrt(y) * cbrt(y) == y.
+  {  // Check that cbrt(y) * cbrt(y) * cbrt(y) == y.
     J z = cbrt(y);
     J w = z * z * z;
     VL << "z = " << z;
@@ -609,7 +611,7 @@
     ExpectJetsClose(w, y);
   }
 
-  { // Check that cbrt(x) == pow(x, 1/3).
+  {  // Check that cbrt(x) == pow(x, 1/3).
     J z = cbrt(x);
     J w = pow(x, 1.0 / 3.0);
     VL << "z = " << z;
@@ -621,7 +623,7 @@
   NumericalTest("cbrt", cbrt<double, 2>, 1e-5);
   NumericalTest("cbrt", cbrt<double, 2>, 1.0);
 
-  { // Check that exp2(x) == exp(x * log(2))
+  {  // Check that exp2(x) == exp(x * log(2))
     J z = exp2(x);
     J w = exp(x * log(2.0));
     VL << "z = " << z;
@@ -636,7 +638,7 @@
   NumericalTest("exp2", exp2<double, 2>, 1e-5);
   NumericalTest("exp2", exp2<double, 2>, 1.0);
 
-  { // Check that log2(x) == log(x) / log(2)
+  {  // Check that log2(x) == log(x) / log(2)
     J z = log2(x);
     J w = log(x) / log(2.0);
     VL << "z = " << z;
@@ -647,15 +649,15 @@
   NumericalTest("log2", log2<double, 2>, 1.0);
   NumericalTest("log2", log2<double, 2>, 100.0);
 
-  { // Check that hypot(x, y) == sqrt(x^2 + y^2)
+  {  // Check that hypot(x, y) == sqrt(x^2 + y^2)
     J h = hypot(x, y);
-    J s = sqrt(x*x + y*y);
+    J s = sqrt(x * x + y * y);
     VL << "h = " << h;
     VL << "s = " << s;
     ExpectJetsClose(h, s);
   }
 
-  { // Check that hypot(x, x) == sqrt(2) * abs(x)
+  {  // Check that hypot(x, x) == sqrt(2) * abs(x)
     J h = hypot(x, x);
     J s = sqrt(2.0) * abs(x);
     VL << "h = " << h;
@@ -663,35 +665,35 @@
     ExpectJetsClose(h, s);
   }
 
-  { // Check that the derivative is zero tangentially to the circle:
+  {  // Check that the derivative is zero tangentially to the circle:
     J h = hypot(MakeJet(2.0, 1.0, 1.0), MakeJet(2.0, 1.0, -1.0));
     VL << "h = " << h;
     ExpectJetsClose(h, MakeJet(sqrt(8.0), std::sqrt(2.0), 0.0));
   }
 
-  { // Check that hypot(x, 0) == x
+  {  // Check that hypot(x, 0) == x
     J zero = MakeJet(0.0, 2.0, 3.14);
     J h = hypot(x, zero);
     VL << "h = " << h;
     ExpectJetsClose(x, h);
   }
 
-  { // Check that hypot(0, y) == y
+  {  // Check that hypot(0, y) == y
     J zero = MakeJet(0.0, 2.0, 3.14);
     J h = hypot(zero, y);
     VL << "h = " << h;
     ExpectJetsClose(y, h);
   }
 
-  { // Check that hypot(x, 0) == sqrt(x * x) == x, even when x * x underflows:
-    EXPECT_EQ(DBL_MIN * DBL_MIN, 0.0); // Make sure it underflows
+  {  // Check that hypot(x, 0) == sqrt(x * x) == x, even when x * x underflows:
+    EXPECT_EQ(DBL_MIN * DBL_MIN, 0.0);  // Make sure it underflows
     J huge = MakeJet(DBL_MIN, 2.0, 3.14);
     J h = hypot(huge, J(0.0));
     VL << "h = " << h;
     ExpectJetsClose(h, huge);
   }
 
-  { // Check that hypot(x, 0) == sqrt(x * x) == x, even when x * x overflows:
+  {  // Check that hypot(x, 0) == sqrt(x * x) == x, even when x * x overflows:
     EXPECT_EQ(DBL_MAX * DBL_MAX, std::numeric_limits<double>::infinity());
     J huge = MakeJet(DBL_MAX, 2.0, 3.14);
     J h = hypot(huge, J(0.0));
@@ -699,6 +701,7 @@
     ExpectJetsClose(h, huge);
   }
 
+  // clang-format off
   NumericalTest2("hypot", hypot<double, 2>,  0.0,   1e-5);
   NumericalTest2("hypot", hypot<double, 2>, -1e-5,  0.0);
   NumericalTest2("hypot", hypot<double, 2>,  1e-5,  1e-5);
@@ -708,6 +711,7 @@
   NumericalTest2("hypot", hypot<double, 2>, -1e-3,  1.0);
   NumericalTest2("hypot", hypot<double, 2>, -1e-3, -1.0);
   NumericalTest2("hypot", hypot<double, 2>,  1.0,   2.0);
+  // clang-format on
 
   {
     J z = fmax(x, y);
@@ -720,14 +724,13 @@
     VL << "z = " << z;
     ExpectJetsClose(y, z);
   }
-
 }
 
 TEST(Jet, JetsInEigenMatrices) {
   J x = MakeJet(2.3, -2.7, 1e-3);
-  J y = MakeJet(1.7,  0.5, 1e+2);
+  J y = MakeJet(1.7, 0.5, 1e+2);
   J z = MakeJet(5.3, -4.7, 1e-3);
-  J w = MakeJet(9.7,  1.5, 10.1);
+  J w = MakeJet(9.7, 1.5, 10.1);
 
   Eigen::Matrix<J, 2, 2> M;
   Eigen::Matrix<J, 2, 1> v, r1, r2;
@@ -795,11 +798,11 @@
   Eigen::Matrix<J, 3, 1> b, x;
   for (int i = 0; i < 3; ++i) {
     for (int j = 0; j < 3; ++j) {
-      A(i,j) = MakeJet(0.0, i, j * j);
+      A(i, j) = MakeJet(0.0, i, j * j);
     }
     b(i) = MakeJet(i, i, i);
     x(i) = MakeJet(0.0, 0.0, 0.0);
-    A(i,i) = MakeJet(1.0, i, i * i);
+    A(i, i) = MakeJet(1.0, i, i * i);
   }
   x = A.llt().solve(b);
   for (int i = 0; i < 3; ++i) {
@@ -812,11 +815,11 @@
   Eigen::Matrix<J, 3, 1> b, x;
   for (int i = 0; i < 3; ++i) {
     for (int j = 0; j < 3; ++j) {
-      A(i,j) = MakeJet(0.0, i, j * j);
+      A(i, j) = MakeJet(0.0, i, j * j);
     }
     b(i) = MakeJet(i, i, i);
     x(i) = MakeJet(0.0, 0.0, 0.0);
-    A(i,i) = MakeJet(1.0, i, i * i);
+    A(i, i) = MakeJet(1.0, i, i * i);
   }
   x = A.ldlt().solve(b);
   for (int i = 0; i < 3; ++i) {
@@ -829,11 +832,11 @@
   Eigen::Matrix<J, 3, 1> b, x;
   for (int i = 0; i < 3; ++i) {
     for (int j = 0; j < 3; ++j) {
-      A(i,j) = MakeJet(0.0, i, j * j);
+      A(i, j) = MakeJet(0.0, i, j * j);
     }
     b(i) = MakeJet(i, i, i);
     x(i) = MakeJet(0.0, 0.0, 0.0);
-    A(i,i) = MakeJet(1.0, i, i * i);
+    A(i, i) = MakeJet(1.0, i, i * i);
   }
 
   x = A.lu().solve(b);
@@ -845,7 +848,7 @@
 // ScalarBinaryOpTraits is only supported on Eigen versions >= 3.3
 TEST(JetTraitsTest, MatrixScalarUnaryOps) {
   const J x = MakeJet(2.3, -2.7, 1e-3);
-  const J y = MakeJet(1.7,  0.5, 1e+2);
+  const J y = MakeJet(1.7, 0.5, 1e+2);
   Eigen::Matrix<J, 2, 1> a;
   a << x, y;
 
@@ -856,9 +859,9 @@
 
 TEST(JetTraitsTest, MatrixScalarBinaryOps) {
   const J x = MakeJet(2.3, -2.7, 1e-3);
-  const J y = MakeJet(1.7,  0.5, 1e+2);
+  const J y = MakeJet(1.7, 0.5, 1e+2);
   const J z = MakeJet(5.3, -4.7, 1e-3);
-  const J w = MakeJet(9.7,  1.5, 10.1);
+  const J w = MakeJet(9.7, 1.5, 10.1);
 
   Eigen::Matrix<J, 2, 2> M;
   Eigen::Vector2d v;
@@ -886,7 +889,7 @@
 
 TEST(JetTraitsTest, ArrayScalarUnaryOps) {
   const J x = MakeJet(2.3, -2.7, 1e-3);
-  const J y = MakeJet(1.7,  0.5, 1e+2);
+  const J y = MakeJet(1.7, 0.5, 1e+2);
   Eigen::Array<J, 2, 1> a;
   a << x, y;
 
@@ -897,7 +900,7 @@
 
 TEST(JetTraitsTest, ArrayScalarBinaryOps) {
   const J x = MakeJet(2.3, -2.7, 1e-3);
-  const J y = MakeJet(1.7,  0.5, 1e+2);
+  const J y = MakeJet(1.7, 0.5, 1e+2);
 
   Eigen::Array<J, 2, 1> a;
   Eigen::Array2d b;
@@ -922,8 +925,8 @@
 }
 
 TEST(Jet, nested3x) {
-  typedef Jet<J,2> JJ;
-  typedef Jet<JJ,2> JJJ;
+  typedef Jet<J, 2> JJ;
+  typedef Jet<JJ, 2> JJJ;
 
   JJJ x;
   x.a = JJ(J(1, 0), 0);
diff --git a/internal/ceres/lapack.cc b/internal/ceres/lapack.cc
index 37efbcd..a159ec7 100644
--- a/internal/ceres/lapack.cc
+++ b/internal/ceres/lapack.cc
@@ -36,11 +36,7 @@
 
 #ifndef CERES_NO_LAPACK
 // C interface to the LAPACK Cholesky factorization and triangular solve.
-extern "C" void dpotrf_(char* uplo,
-                       int* n,
-                       double* a,
-                       int* lda,
-                       int* info);
+extern "C" void dpotrf_(char* uplo, int* n, double* a, int* lda, int* info);
 
 extern "C" void dpotrs_(char* uplo,
                         int* n,
@@ -92,10 +88,10 @@
   }
 
   if (info > 0) {
-    *message =
-        StringPrintf(
-            "LAPACK::dpotrf numerical failure. "
-             "The leading minor of order %d is not positive definite.", info);
+    *message = StringPrintf(
+        "LAPACK::dpotrf numerical failure. "
+        "The leading minor of order %d is not positive definite.",
+        info);
     return LINEAR_SOLVER_FAILURE;
   }
 
diff --git a/internal/ceres/lapack.h b/internal/ceres/lapack.h
index 5bb1a22..5c5bf8b 100644
--- a/internal/ceres/lapack.h
+++ b/internal/ceres/lapack.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_LAPACK_H_
 
 #include <string>
+
 #include "ceres/internal/port.h"
 #include "ceres/linear_solver.h"
 
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index 9eec631..cb0e937 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -61,8 +61,7 @@
   CHECK_GT(max_radius_, 0.0);
 }
 
-LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {
-}
+LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {}
 
 TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
     const TrustRegionStrategy::PerSolveOptions& per_solve_options,
@@ -81,8 +80,8 @@
 
     jacobian->SquaredColumnNorm(diagonal_.data());
     for (int i = 0; i < num_parameters; ++i) {
-      diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
-                              max_diagonal_);
+      diagonal_[i] =
+          std::min(std::max(diagonal_[i], min_diagonal_), max_diagonal_);
     }
   }
 
@@ -112,7 +111,7 @@
   if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
     LOG(WARNING) << "Linear solver fatal error: "
                  << linear_solver_summary.message;
-  } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE)  {
+  } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE) {
     LOG(WARNING) << "Linear solver failure. Failed to compute a step: "
                  << linear_solver_summary.message;
   } else if (!IsArrayValid(num_parameters, step)) {
@@ -138,7 +137,6 @@
     }
   }
 
-
   TrustRegionStrategy::Summary summary;
   summary.residual_norm = linear_solver_summary.residual_norm;
   summary.num_iterations = linear_solver_summary.num_iterations;
@@ -148,8 +146,8 @@
 
 void LevenbergMarquardtStrategy::StepAccepted(double step_quality) {
   CHECK_GT(step_quality, 0.0);
-  radius_ = radius_ / std::max(1.0 / 3.0,
-                               1.0 - pow(2.0 * step_quality - 1.0, 3));
+  radius_ =
+      radius_ / std::max(1.0 / 3.0, 1.0 - pow(2.0 * step_quality - 1.0, 3));
   radius_ = std::min(max_radius_, radius_);
   decrease_factor_ = 2.0;
   reuse_diagonal_ = false;
@@ -161,9 +159,7 @@
   reuse_diagonal_ = true;
 }
 
-double LevenbergMarquardtStrategy::Radius() const {
-  return radius_;
-}
+double LevenbergMarquardtStrategy::Radius() const { return radius_; }
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/levenberg_marquardt_strategy.h b/internal/ceres/levenberg_marquardt_strategy.h
index 8fb37f3..4cadd6d 100644
--- a/internal/ceres/levenberg_marquardt_strategy.h
+++ b/internal/ceres/levenberg_marquardt_strategy.h
@@ -74,7 +74,7 @@
   const double max_diagonal_;
   double decrease_factor_;
   bool reuse_diagonal_;
-  Vector diagonal_;   // diagonal_ =  diag(J'J)
+  Vector diagonal_;  // diagonal_ =  diag(J'J)
   // Scaled copy of diagonal_. Stored here as optimization to prevent
   // allocations in every iteration and reuse when a step fails and
   // ComputeStep is called again.
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
index f8f06bf..500f269 100644
--- a/internal/ceres/levenberg_marquardt_strategy_test.cc
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -28,9 +28,11 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include <memory>
-#include "ceres/internal/eigen.h"
 #include "ceres/levenberg_marquardt_strategy.h"
+
+#include <memory>
+
+#include "ceres/internal/eigen.h"
 #include "ceres/linear_solver.h"
 #include "ceres/trust_region_strategy.h"
 #include "glog/logging.h"
@@ -38,11 +40,11 @@
 #include "gmock/mock-log.h"
 #include "gtest/gtest.h"
 
+using testing::_;
 using testing::AllOf;
 using testing::AnyNumber;
 using testing::HasSubstr;
 using testing::ScopedMockLog;
-using testing::_;
 
 namespace ceres {
 namespace internal {
@@ -54,9 +56,7 @@
 class RegularizationCheckingLinearSolver : public DenseSparseMatrixSolver {
  public:
   RegularizationCheckingLinearSolver(const int num_cols, const double* diagonal)
-      : num_cols_(num_cols),
-        diagonal_(diagonal) {
-  }
+      : num_cols_(num_cols), diagonal_(diagonal) {}
 
   virtual ~RegularizationCheckingLinearSolver() {}
 
@@ -152,11 +152,11 @@
 #if defined(_MSC_VER)
     // Use GLOG_WARNING to support MSVC if GLOG_NO_ABBREVIATED_SEVERITIES
     // is defined.
-    EXPECT_CALL(log, Log(GLOG_WARNING, _,
-                         HasSubstr("Failed to compute a step")));
+    EXPECT_CALL(log,
+                Log(GLOG_WARNING, _, HasSubstr("Failed to compute a step")));
 #else
-    EXPECT_CALL(log, Log(google::WARNING, _,
-                         HasSubstr("Failed to compute a step")));
+    EXPECT_CALL(log,
+                Log(google::WARNING, _, HasSubstr("Failed to compute a step")));
 #endif
 
     TrustRegionStrategy::Summary summary =
diff --git a/internal/ceres/line_search.cc b/internal/ceres/line_search.cc
index 352c64f..3f5103f 100644
--- a/internal/ceres/line_search.cc
+++ b/internal/ceres/line_search.cc
@@ -57,10 +57,10 @@
 const int kErrorMessageNumericPrecision = 8;
 }  // namespace
 
-ostream& operator<<(ostream &os, const FunctionSample& sample);
+ostream& operator<<(ostream& os, const FunctionSample& sample);
 
 // Convenience stream operator for pushing FunctionSamples into log messages.
-ostream& operator<<(ostream &os, const FunctionSample& sample) {
+ostream& operator<<(ostream& os, const FunctionSample& sample) {
   os << sample.ToDebugString();
   return os;
 }
@@ -73,17 +73,17 @@
                                string* error) {
   LineSearch* line_search = NULL;
   switch (line_search_type) {
-  case ceres::ARMIJO:
-    line_search = new ArmijoLineSearch(options);
-    break;
-  case ceres::WOLFE:
-    line_search = new WolfeLineSearch(options);
-    break;
-  default:
-    *error = string("Invalid line search algorithm type: ") +
-        LineSearchTypeToString(line_search_type) +
-        string(", unable to create line search.");
-    return NULL;
+    case ceres::ARMIJO:
+      line_search = new ArmijoLineSearch(options);
+      break;
+    case ceres::WOLFE:
+      line_search = new WolfeLineSearch(options);
+      break;
+    default:
+      *error = string("Invalid line search algorithm type: ") +
+               LineSearchTypeToString(line_search_type) +
+               string(", unable to create line search.");
+      return NULL;
   }
   return line_search;
 }
@@ -96,8 +96,7 @@
       initial_evaluator_residual_time_in_seconds(0.0),
       initial_evaluator_jacobian_time_in_seconds(0.0) {}
 
-void LineSearchFunction::Init(const Vector& position,
-                              const Vector& direction) {
+void LineSearchFunction::Init(const Vector& position, const Vector& direction) {
   position_ = position;
   direction_ = direction;
 }
@@ -200,9 +199,9 @@
   summary->polynomial_minimization_time_in_seconds = 0.0;
   options().function->ResetTimeStatistics();
   this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary);
-  options().function->
-      TimeStatistics(&summary->cost_evaluation_time_in_seconds,
-                     &summary->gradient_evaluation_time_in_seconds);
+  options().function->TimeStatistics(
+      &summary->cost_evaluation_time_in_seconds,
+      &summary->gradient_evaluation_time_in_seconds);
 
   summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
 }
@@ -218,8 +217,7 @@
     const double min_step_size,
     const double max_step_size) const {
   if (!current.value_is_valid ||
-      (interpolation_type == BISECTION &&
-       max_step_size <= current.x)) {
+      (interpolation_type == BISECTION && max_step_size <= current.x)) {
     // Either: sample is invalid; or we are using BISECTION and contracting
     // the step size.
     return std::min(std::max(current.x * 0.5, min_step_size), max_step_size);
@@ -274,8 +272,8 @@
   }
 
   double step_size = 0.0, unused_min_value = 0.0;
-  MinimizeInterpolatingPolynomial(samples, min_step_size, max_step_size,
-                                  &step_size, &unused_min_value);
+  MinimizeInterpolatingPolynomial(
+      samples, min_step_size, max_step_size, &step_size, &unused_min_value);
   return step_size;
 }
 
@@ -315,40 +313,38 @@
 
   function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
   while (!current.value_is_valid ||
-         current.value > (initial_cost
-                          + options().sufficient_decrease
-                          * initial_gradient
-                          * current.x)) {
+         current.value > (initial_cost + options().sufficient_decrease *
+                                             initial_gradient * current.x)) {
     // If current.value_is_valid is false, we treat it as if the cost at that
     // point is not large enough to satisfy the sufficient decrease condition.
     ++summary->num_iterations;
     if (summary->num_iterations >= options().max_num_iterations) {
-      summary->error =
-          StringPrintf("Line search failed: Armijo failed to find a point "
-                       "satisfying the sufficient decrease condition within "
-                       "specified max_num_iterations: %d.",
-                       options().max_num_iterations);
+      summary->error = StringPrintf(
+          "Line search failed: Armijo failed to find a point "
+          "satisfying the sufficient decrease condition within "
+          "specified max_num_iterations: %d.",
+          options().max_num_iterations);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       return;
     }
 
     const double polynomial_minimization_start_time = WallTimeInSeconds();
-    const double step_size =
-        this->InterpolatingPolynomialMinimizingStepSize(
-            options().interpolation_type,
-            initial_position,
-            previous,
-            current,
-            (options().max_step_contraction * current.x),
-            (options().min_step_contraction * current.x));
+    const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
+        options().interpolation_type,
+        initial_position,
+        previous,
+        current,
+        (options().max_step_contraction * current.x),
+        (options().min_step_contraction * current.x));
     summary->polynomial_minimization_time_in_seconds +=
         (WallTimeInSeconds() - polynomial_minimization_start_time);
 
     if (step_size * descent_direction_max_norm < options().min_step_size) {
-      summary->error =
-          StringPrintf("Line search failed: step_size too small: %.5e "
-                       "with descent_direction_max_norm: %.5e.", step_size,
-                       descent_direction_max_norm);
+      summary->error = StringPrintf(
+          "Line search failed: step_size too small: %.5e "
+          "with descent_direction_max_norm: %.5e.",
+          step_size,
+          descent_direction_max_norm);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       return;
     }
@@ -435,8 +431,8 @@
   }
 
   VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
-          << "Starting line search zoom phase with bracket_low: "
-          << bracket_low << ", bracket_high: " << bracket_high
+          << "Starting line search zoom phase with bracket_low: " << bracket_low
+          << ", bracket_high: " << bracket_high
           << ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
           << ", bracket abs delta cost: "
           << fabs(bracket_low.value - bracket_high.value);
@@ -461,11 +457,9 @@
   //          but still has bracket_high.value < initial_position.value.
   //   3. bracket_high is chosen after bracket_low, s.t.
   //      bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
-  if (!this->ZoomPhase(initial_position,
-                       bracket_low,
-                       bracket_high,
-                       &solution,
-                       summary) && !solution.value_is_valid) {
+  if (!this->ZoomPhase(
+          initial_position, bracket_low, bracket_high, &solution, summary) &&
+      !solution.value_is_valid) {
     // Failed to find a valid point (given the specified decrease parameters)
     // within the specified bracket.
     return;
@@ -501,20 +495,18 @@
 //
 // Returns false if no step size > minimum step size was found which
 // satisfies at least the Armijo condition.
-bool WolfeLineSearch::BracketingPhase(
-    const FunctionSample& initial_position,
-    const double step_size_estimate,
-    FunctionSample* bracket_low,
-    FunctionSample* bracket_high,
-    bool* do_zoom_search,
-    Summary* summary) const {
+bool WolfeLineSearch::BracketingPhase(const FunctionSample& initial_position,
+                                      const double step_size_estimate,
+                                      FunctionSample* bracket_low,
+                                      FunctionSample* bracket_high,
+                                      bool* do_zoom_search,
+                                      Summary* summary) const {
   LineSearchFunction* function = options().function;
 
   FunctionSample previous = initial_position;
   FunctionSample current;
 
-  const double descent_direction_max_norm =
-      function->DirectionInfinityNorm();
+  const double descent_direction_max_norm = function->DirectionInfinityNorm();
 
   *do_zoom_search = false;
   *bracket_low = initial_position;
@@ -536,10 +528,9 @@
     ++summary->num_iterations;
 
     if (current.value_is_valid &&
-        (current.value > (initial_position.value
-                          + options().sufficient_decrease
-                          * initial_position.gradient
-                          * current.x) ||
+        (current.value > (initial_position.value +
+                          options().sufficient_decrease *
+                              initial_position.gradient * current.x) ||
          (previous.value_is_valid && current.value > previous.value))) {
       // Bracket found: current step size violates Armijo sufficient decrease
       // condition, or has stepped past an inflection point of f() relative to
@@ -556,8 +547,8 @@
     }
 
     if (current.value_is_valid &&
-        fabs(current.gradient) <=
-        -options().sufficient_curvature_decrease * initial_position.gradient) {
+        fabs(current.gradient) <= -options().sufficient_curvature_decrease *
+                                      initial_position.gradient) {
       // Current step size satisfies the strong Wolfe conditions, and is thus a
       // valid termination point, therefore a Zoom not required.
       *bracket_low = current;
@@ -585,8 +576,8 @@
       break;
 
     } else if (current.value_is_valid &&
-               fabs(current.x - previous.x) * descent_direction_max_norm
-               < options().min_step_size) {
+               fabs(current.x - previous.x) * descent_direction_max_norm <
+                   options().min_step_size) {
       // We have shrunk the search bracket to a width less than our tolerance,
       // and still not found either a point satisfying the strong Wolfe
       // conditions, or a valid bracket containing such a point. Stop searching
@@ -595,9 +586,9 @@
       LOG_IF(WARNING, !options().is_silent)
           << "Line search failed: Wolfe bracketing phase shrank "
           << "bracket width: " << fabs(current.x - previous.x)
-          <<  ", to < tolerance: " << options().min_step_size
-          << ", with descent_direction_max_norm: "
-          << descent_direction_max_norm << ", and failed to find "
+          << ", to < tolerance: " << options().min_step_size
+          << ", with descent_direction_max_norm: " << descent_direction_max_norm
+          << ", and failed to find "
           << "a point satisfying the strong Wolfe conditions or a "
           << "bracketing containing such a point. Accepting "
           << "point found satisfying Armijo condition only, to "
@@ -609,18 +600,20 @@
       // Check num iterations bound here so that we always evaluate the
       // max_num_iterations-th iteration against all conditions, and
       // then perform no additional (unused) evaluations.
-      summary->error =
-          StringPrintf("Line search failed: Wolfe bracketing phase failed to "
-                       "find a point satisfying strong Wolfe conditions, or a "
-                       "bracket containing such a point within specified "
-                       "max_num_iterations: %d", options().max_num_iterations);
+      summary->error = StringPrintf(
+          "Line search failed: Wolfe bracketing phase failed to "
+          "find a point satisfying strong Wolfe conditions, or a "
+          "bracket containing such a point within specified "
+          "max_num_iterations: %d",
+          options().max_num_iterations);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       // Ensure that bracket_low is always set to the step size amongst all
       // those tested which minimizes f() and satisfies the Armijo condition
       // when we terminate due to the 'artificial' max_num_iterations condition.
       *bracket_low =
           current.value_is_valid && current.value < bracket_low->value
-          ? current : *bracket_low;
+              ? current
+              : *bracket_low;
       break;
     }
     // Either: f(current) is invalid; or, f(current) is valid, but does not
@@ -632,17 +625,16 @@
     // size.
     //
     // In Nocedal & Wright [1] (p60), the step-size can only increase in the
-    // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k * factor].
-    // However this does not account for the function returning invalid values
-    // which we support, in which case we need to contract the step size whilst
-    // ensuring that we do not invert the bracket, i.e, we require that:
+    // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k *
+    // factor]. However this does not account for the function returning invalid
+    // values which we support, in which case we need to contract the step size
+    // whilst ensuring that we do not invert the bracket, i.e, we require that:
     // step_size_{k-1} <= step_size_{k+1} < step_size_k.
     const double min_step_size =
-        current.value_is_valid
-        ? current.x : previous.x;
+        current.value_is_valid ? current.x : previous.x;
     const double max_step_size =
-        current.value_is_valid
-        ? (current.x * options().max_step_expansion) : current.x;
+        current.value_is_valid ? (current.x * options().max_step_expansion)
+                               : current.x;
 
     // We are performing 2-point interpolation only here, but the API of
     // InterpolatingPolynomialMinimizingStepSize() allows for up to
@@ -652,21 +644,21 @@
     DCHECK(!unused_previous.value_is_valid);
     // Contracts step size if f(current) is not valid.
     const double polynomial_minimization_start_time = WallTimeInSeconds();
-    const double step_size =
-        this->InterpolatingPolynomialMinimizingStepSize(
-            options().interpolation_type,
-            previous,
-            unused_previous,
-            current,
-            min_step_size,
-            max_step_size);
+    const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
+        options().interpolation_type,
+        previous,
+        unused_previous,
+        current,
+        min_step_size,
+        max_step_size);
     summary->polynomial_minimization_time_in_seconds +=
         (WallTimeInSeconds() - polynomial_minimization_start_time);
     if (step_size * descent_direction_max_norm < options().min_step_size) {
-      summary->error =
-          StringPrintf("Line search failed: step_size too small: %.5e "
-                       "with descent_direction_max_norm: %.5e", step_size,
-                       descent_direction_max_norm);
+      summary->error = StringPrintf(
+          "Line search failed: step_size too small: %.5e "
+          "with descent_direction_max_norm: %.5e",
+          step_size,
+          descent_direction_max_norm);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
@@ -684,8 +676,8 @@
   // Ensure that even if a valid bracket was found, we will only mark a zoom
   // as required if the bracket's width is greater than our minimum tolerance.
   if (*do_zoom_search &&
-      fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm
-      < options().min_step_size) {
+      fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm <
+          options().min_step_size) {
     *do_zoom_search = false;
   }
 
@@ -707,8 +699,7 @@
       << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
       << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
       << "the developers!, initial_position: " << initial_position
-      << ", bracket_low: " << bracket_low
-      << ", bracket_high: "<< bracket_high;
+      << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
   // We do not require bracket_high.gradient_is_valid as the gradient condition
   // for a valid bracket is only dependent upon bracket_low.gradient, and
   // in order to minimize jacobian evaluations, bracket_high.gradient may
@@ -725,8 +716,7 @@
       << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
       << "Ceres bug: f_high input to Wolfe Zoom invalid, please "
       << "contact the developers!, initial_position: " << initial_position
-      << ", bracket_low: " << bracket_low
-      << ", bracket_high: "<< bracket_high;
+      << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
 
   if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
     // The third condition for a valid initial bracket:
@@ -738,17 +728,17 @@
     // returns inconsistent gradient values relative to the function values,
     // we do not CHECK_LT(), but we do stop processing and return an invalid
     // value.
-    summary->error =
-        StringPrintf("Line search failed: Wolfe zoom phase passed a bracket "
-                     "which does not satisfy: bracket_low.gradient * "
-                     "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
-                     "with initial_position: %s, bracket_low: %s, bracket_high:"
-                     " %s, the most likely cause of which is the cost function "
-                     "returning inconsistent gradient & function values.",
-                     bracket_low.gradient * (bracket_high.x - bracket_low.x),
-                     initial_position.ToDebugString().c_str(),
-                     bracket_low.ToDebugString().c_str(),
-                     bracket_high.ToDebugString().c_str());
+    summary->error = StringPrintf(
+        "Line search failed: Wolfe zoom phase passed a bracket "
+        "which does not satisfy: bracket_low.gradient * "
+        "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
+        "with initial_position: %s, bracket_low: %s, bracket_high:"
+        " %s, the most likely cause of which is the cost function "
+        "returning inconsistent gradient & function values.",
+        bracket_low.gradient * (bracket_high.x - bracket_low.x),
+        initial_position.ToDebugString().c_str(),
+        bracket_low.ToDebugString().c_str(),
+        bracket_high.ToDebugString().c_str());
     LOG_IF(WARNING, !options().is_silent) << summary->error;
     solution->value_is_valid = false;
     return false;
@@ -763,24 +753,25 @@
     // not satisfy the Wolfe condition.
     *solution = bracket_low;
     if (summary->num_iterations >= options().max_num_iterations) {
-      summary->error =
-          StringPrintf("Line search failed: Wolfe zoom phase failed to "
-                       "find a point satisfying strong Wolfe conditions "
-                       "within specified max_num_iterations: %d, "
-                       "(num iterations taken for bracketing: %d).",
-                       options().max_num_iterations, num_bracketing_iterations);
+      summary->error = StringPrintf(
+          "Line search failed: Wolfe zoom phase failed to "
+          "find a point satisfying strong Wolfe conditions "
+          "within specified max_num_iterations: %d, "
+          "(num iterations taken for bracketing: %d).",
+          options().max_num_iterations,
+          num_bracketing_iterations);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
-    if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm
-        < options().min_step_size) {
+    if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm <
+        options().min_step_size) {
       // Bracket width has been reduced below tolerance, and no point satisfying
       // the strong Wolfe conditions has been found.
-      summary->error =
-          StringPrintf("Line search failed: Wolfe zoom bracket width: %.5e "
-                       "too small with descent_direction_max_norm: %.5e.",
-                       fabs(bracket_high.x - bracket_low.x),
-                       descent_direction_max_norm);
+      summary->error = StringPrintf(
+          "Line search failed: Wolfe zoom bracket width: %.5e "
+          "too small with descent_direction_max_norm: %.5e.",
+          fabs(bracket_high.x - bracket_low.x),
+          descent_direction_max_norm);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
@@ -799,14 +790,13 @@
     const FunctionSample unused_previous;
     DCHECK(!unused_previous.value_is_valid);
     const double polynomial_minimization_start_time = WallTimeInSeconds();
-    const double step_size =
-        this->InterpolatingPolynomialMinimizingStepSize(
-            options().interpolation_type,
-            lower_bound_step,
-            unused_previous,
-            upper_bound_step,
-            lower_bound_step.x,
-            upper_bound_step.x);
+    const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
+        options().interpolation_type,
+        lower_bound_step,
+        unused_previous,
+        upper_bound_step,
+        lower_bound_step.x,
+        upper_bound_step.x);
     summary->polynomial_minimization_time_in_seconds +=
         (WallTimeInSeconds() - polynomial_minimization_start_time);
     // No check on magnitude of step size being too small here as it is
@@ -826,12 +816,14 @@
     const bool kEvaluateGradient = true;
     function->Evaluate(step_size, kEvaluateGradient, solution);
     if (!solution->value_is_valid || !solution->gradient_is_valid) {
-      summary->error =
-          StringPrintf("Line search failed: Wolfe Zoom phase found "
-                       "step_size: %.5e, for which function is invalid, "
-                       "between low_step: %.5e and high_step: %.5e "
-                       "at which function is valid.",
-                       solution->x, bracket_low.x, bracket_high.x);
+      summary->error = StringPrintf(
+          "Line search failed: Wolfe Zoom phase found "
+          "step_size: %.5e, for which function is invalid, "
+          "between low_step: %.5e and high_step: %.5e "
+          "at which function is valid.",
+          solution->x,
+          bracket_low.x,
+          bracket_high.x);
       LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
@@ -842,10 +834,9 @@
             << ", bracket_high: " << bracket_high
             << ", minimizing solution: " << *solution;
 
-    if ((solution->value > (initial_position.value
-                            + options().sufficient_decrease
-                            * initial_position.gradient
-                            * solution->x)) ||
+    if ((solution->value > (initial_position.value +
+                            options().sufficient_decrease *
+                                initial_position.gradient * solution->x)) ||
         (solution->value >= bracket_low.value)) {
       // Armijo sufficient decrease not satisfied, or not better
       // than current lowest sample, use as new upper bound.
diff --git a/internal/ceres/line_search.h b/internal/ceres/line_search.h
index d59fd77..634c971 100644
--- a/internal/ceres/line_search.h
+++ b/internal/ceres/line_search.h
@@ -35,6 +35,7 @@
 
 #include <string>
 #include <vector>
+
 #include "ceres/function_sample.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/port.h"
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
index 1f9d205..74e9d91 100644
--- a/internal/ceres/line_search_direction.cc
+++ b/internal/ceres/line_search_direction.cc
@@ -29,9 +29,10 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/line_search_direction.h"
+
+#include "ceres/internal/eigen.h"
 #include "ceres/line_search_minimizer.h"
 #include "ceres/low_rank_inverse_hessian.h"
-#include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
 namespace ceres {
@@ -52,9 +53,7 @@
  public:
   NonlinearConjugateGradient(const NonlinearConjugateGradientType type,
                              const double function_tolerance)
-      : type_(type),
-        function_tolerance_(function_tolerance) {
-  }
+      : type_(type), function_tolerance_(function_tolerance) {}
 
   bool NextDirection(const LineSearchMinimizer::State& previous,
                      const LineSearchMinimizer::State& current,
@@ -72,14 +71,14 @@
         break;
       case HESTENES_STIEFEL:
         gradient_change = current.gradient - previous.gradient;
-        beta =  (current.gradient.dot(gradient_change) /
-                 previous.search_direction.dot(gradient_change));
+        beta = (current.gradient.dot(gradient_change) /
+                previous.search_direction.dot(gradient_change));
         break;
       default:
         LOG(FATAL) << "Unknown nonlinear conjugate gradient type: " << type_;
     }
 
-    *search_direction =  -current.gradient + beta * previous.search_direction;
+    *search_direction = -current.gradient + beta * previous.search_direction;
     const double directional_derivative =
         current.gradient.dot(*search_direction);
     if (directional_derivative > -function_tolerance_) {
@@ -144,8 +143,7 @@
 
 class BFGS : public LineSearchDirection {
  public:
-  BFGS(const int num_parameters,
-       const bool use_approximate_eigenvalue_scaling)
+  BFGS(const int num_parameters, const bool use_approximate_eigenvalue_scaling)
       : num_parameters_(num_parameters),
         use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
         initialized_(false),
@@ -212,8 +210,8 @@
     if (delta_x_dot_delta_gradient <=
         kBFGSSecantConditionHessianUpdateTolerance) {
       VLOG(2) << "Skipping BFGS Update, delta_x_dot_delta_gradient too "
-              << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
-              << kBFGSSecantConditionHessianUpdateTolerance
+              << "small: " << delta_x_dot_delta_gradient
+              << ", tolerance: " << kBFGSSecantConditionHessianUpdateTolerance
               << " (Secant condition).";
     } else {
       // Update dense inverse Hessian approximation.
@@ -300,13 +298,13 @@
 
       // Calculate scalar: (1 + \rho_k * y_k' * H_k * y_k)
       const double delta_x_times_delta_x_transpose_scale_factor =
-          (1.0 + (rho_k * delta_gradient.transpose() *
-                  inverse_hessian_.selfadjointView<Eigen::Lower>() *
-                  delta_gradient));
+          (1.0 +
+           (rho_k * delta_gradient.transpose() *
+            inverse_hessian_.selfadjointView<Eigen::Lower>() * delta_gradient));
       // Calculate: B = (1 + \rho_k * y_k' * H_k * y_k) * s_k * s_k'
       Matrix B = Matrix::Zero(num_parameters_, num_parameters_);
-      B.selfadjointView<Eigen::Lower>().
-          rankUpdate(delta_x, delta_x_times_delta_x_transpose_scale_factor);
+      B.selfadjointView<Eigen::Lower>().rankUpdate(
+          delta_x, delta_x_times_delta_x_transpose_scale_factor);
 
       // Finally, update inverse Hessian approximation according to:
       // H_k = H_{k-1} + \rho_k * (B - (A + A')).  Note that (A + A') is
@@ -315,9 +313,8 @@
           rho_k * (B - A - A.transpose());
     }
 
-    *search_direction =
-        inverse_hessian_.selfadjointView<Eigen::Lower>() *
-        (-1.0 * current.gradient);
+    *search_direction = inverse_hessian_.selfadjointView<Eigen::Lower>() *
+                        (-1.0 * current.gradient);
 
     if (search_direction->dot(current.gradient) >= 0.0) {
       LOG(WARNING) << "Numerical failure in BFGS update: inverse Hessian "
@@ -339,16 +336,15 @@
   bool is_positive_definite_;
 };
 
-LineSearchDirection*
-LineSearchDirection::Create(const LineSearchDirection::Options& options) {
+LineSearchDirection* LineSearchDirection::Create(
+    const LineSearchDirection::Options& options) {
   if (options.type == STEEPEST_DESCENT) {
     return new SteepestDescent;
   }
 
   if (options.type == NONLINEAR_CONJUGATE_GRADIENT) {
     return new NonlinearConjugateGradient(
-        options.nonlinear_conjugate_gradient_type,
-        options.function_tolerance);
+        options.nonlinear_conjugate_gradient_type, options.function_tolerance);
   }
 
   if (options.type == ceres::LBFGS) {
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h
index 467578d..2fcf472 100644
--- a/internal/ceres/line_search_direction.h
+++ b/internal/ceres/line_search_direction.h
@@ -47,8 +47,7 @@
           nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
           function_tolerance(1e-12),
           max_lbfgs_rank(20),
-          use_approximate_eigenvalue_bfgs_scaling(true) {
-    }
+          use_approximate_eigenvalue_bfgs_scaling(true) {}
 
     int num_parameters;
     LineSearchDirectionType type;
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc
index 931f56c..5fed73c 100644
--- a/internal/ceres/line_search_minimizer.cc
+++ b/internal/ceres/line_search_minimizer.cc
@@ -41,8 +41,8 @@
 #include "ceres/line_search_minimizer.h"
 
 #include <algorithm>
-#include <cstdlib>
 #include <cmath>
+#include <cstdlib>
 #include <memory>
 #include <string>
 #include <vector>
@@ -88,7 +88,7 @@
                                    Solver::Summary* summary) {
   const bool is_not_silent = !options.is_silent;
   double start_time = WallTimeInSeconds();
-  double iteration_start_time =  start_time;
+  double iteration_start_time = start_time;
 
   CHECK(options.evaluator != nullptr);
   Evaluator* evaluator = options.evaluator.get();
@@ -129,8 +129,9 @@
 
   if (!EvaluateGradientNorms(evaluator, x, &current_state, &summary->message)) {
     summary->termination_type = FAILURE;
-    summary->message = "Initial cost and jacobian evaluation failed. "
-        "More details: " + summary->message;
+    summary->message =
+        "Initial cost and jacobian evaluation failed. More details: " +
+        summary->message;
     LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
     return;
   }
@@ -141,10 +142,10 @@
   iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
   iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
   if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
-    summary->message = StringPrintf("Gradient tolerance reached. "
-                                    "Gradient max norm: %e <= %e",
-                                    iteration_summary.gradient_max_norm,
-                                    options.gradient_tolerance);
+    summary->message =
+        StringPrintf("Gradient tolerance reached. Gradient max norm: %e <= %e",
+                     iteration_summary.gradient_max_norm,
+                     options.gradient_tolerance);
     summary->termination_type = CONVERGENCE;
     VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
     return;
@@ -153,8 +154,7 @@
   iteration_summary.iteration_time_in_seconds =
       WallTimeInSeconds() - iteration_start_time;
   iteration_summary.cumulative_time_in_seconds =
-      WallTimeInSeconds() - start_time
-      + summary->preprocessor_time_in_seconds;
+      WallTimeInSeconds() - start_time + summary->preprocessor_time_in_seconds;
   summary->iterations.push_back(iteration_summary);
 
   LineSearchDirection::Options line_search_direction_options;
@@ -189,10 +189,8 @@
   line_search_options.is_silent = options.is_silent;
   line_search_options.function = &line_search_function;
 
-  std::unique_ptr<LineSearch>
-      line_search(LineSearch::Create(options.line_search_type,
-                                     line_search_options,
-                                     &summary->message));
+  std::unique_ptr<LineSearch> line_search(LineSearch::Create(
+      options.line_search_type, line_search_options, &summary->message));
   if (line_search.get() == nullptr) {
     summary->termination_type = FAILURE;
     LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
@@ -216,7 +214,7 @@
     }
 
     const double total_solver_time = iteration_start_time - start_time +
-        summary->preprocessor_time_in_seconds;
+                                     summary->preprocessor_time_in_seconds;
     if (total_solver_time >= options.max_solver_time_in_seconds) {
       summary->message = "Maximum solver time reached.";
       summary->termination_type = NO_CONVERGENCE;
@@ -234,21 +232,19 @@
       current_state.search_direction = -current_state.gradient;
     } else {
       line_search_status = line_search_direction->NextDirection(
-          previous_state,
-          current_state,
-          &current_state.search_direction);
+          previous_state, current_state, &current_state.search_direction);
     }
 
     if (!line_search_status &&
         num_line_search_direction_restarts >=
-        options.max_num_line_search_direction_restarts) {
+            options.max_num_line_search_direction_restarts) {
       // Line search direction failed to generate a new direction, and we
       // have already reached our specified maximum number of restarts,
       // terminate optimization.
-      summary->message =
-          StringPrintf("Line search direction failure: specified "
-                       "max_num_line_search_direction_restarts: %d reached.",
-                       options.max_num_line_search_direction_restarts);
+      summary->message = StringPrintf(
+          "Line search direction failure: specified "
+          "max_num_line_search_direction_restarts: %d reached.",
+          options.max_num_line_search_direction_restarts);
       summary->termination_type = FAILURE;
       LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
       break;
@@ -261,14 +257,12 @@
       ++num_line_search_direction_restarts;
       LOG_IF(WARNING, is_not_silent)
           << "Line search direction algorithm: "
-          << LineSearchDirectionTypeToString(
-              options.line_search_direction_type)
+          << LineSearchDirectionTypeToString(options.line_search_direction_type)
           << ", failed to produce a valid new direction at "
           << "iteration: " << iteration_summary.iteration
           << ". Restarting, number of restarts: "
           << num_line_search_direction_restarts << " / "
-          << options.max_num_line_search_direction_restarts
-          << " [max].";
+          << options.max_num_line_search_direction_restarts << " [max].";
       line_search_direction.reset(
           LineSearchDirection::Create(line_search_direction_options));
       current_state.search_direction = -current_state.gradient;
@@ -286,19 +280,21 @@
     // iteration.
     const double initial_step_size =
         (iteration_summary.iteration == 1 || !line_search_status)
-        ? std::min(1.0, 1.0 / current_state.gradient_max_norm)
-        : std::min(1.0, 2.0 * (current_state.cost - previous_state.cost) /
-                   current_state.directional_derivative);
+            ? std::min(1.0, 1.0 / current_state.gradient_max_norm)
+            : std::min(1.0,
+                       2.0 * (current_state.cost - previous_state.cost) /
+                           current_state.directional_derivative);
     // By definition, we should only ever go forwards along the specified search
     // direction in a line search, most likely cause for this being violated
     // would be a numerical failure in the line search direction calculation.
     if (initial_step_size < 0.0) {
-      summary->message =
-          StringPrintf("Numerical failure in line search, initial_step_size is "
-                       "negative: %.5e, directional_derivative: %.5e, "
-                       "(current_cost - previous_cost): %.5e",
-                       initial_step_size, current_state.directional_derivative,
-                       (current_state.cost - previous_state.cost));
+      summary->message = StringPrintf(
+          "Numerical failure in line search, initial_step_size is "
+          "negative: %.5e, directional_derivative: %.5e, "
+          "(current_cost - previous_cost): %.5e",
+          initial_step_size,
+          current_state.directional_derivative,
+          (current_state.cost - previous_state.cost));
       summary->termination_type = FAILURE;
       LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
       break;
@@ -309,13 +305,14 @@
                         current_state.directional_derivative,
                         &line_search_summary);
     if (!line_search_summary.success) {
-      summary->message =
-          StringPrintf("Numerical failure in line search, failed to find "
-                       "a valid step size, (did not run out of iterations) "
-                       "using initial_step_size: %.5e, initial_cost: %.5e, "
-                       "initial_gradient: %.5e.",
-                       initial_step_size, current_state.cost,
-                       current_state.directional_derivative);
+      summary->message = StringPrintf(
+          "Numerical failure in line search, failed to find "
+          "a valid step size, (did not run out of iterations) "
+          "using initial_step_size: %.5e, initial_cost: %.5e, "
+          "initial_gradient: %.5e.",
+          initial_step_size,
+          current_state.cost,
+          current_state.directional_derivative);
       LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
       summary->termination_type = FAILURE;
       break;
@@ -373,7 +370,7 @@
 
     iteration_summary.step_is_valid = true;
     iteration_summary.step_is_successful = true;
-    iteration_summary.step_size =  current_state.step_size;
+    iteration_summary.step_size = current_state.step_size;
     iteration_summary.line_search_function_evaluations =
         line_search_summary.num_function_evaluations;
     iteration_summary.line_search_gradient_evaluations =
@@ -383,8 +380,8 @@
     iteration_summary.iteration_time_in_seconds =
         WallTimeInSeconds() - iteration_start_time;
     iteration_summary.cumulative_time_in_seconds =
-        WallTimeInSeconds() - start_time
-        + summary->preprocessor_time_in_seconds;
+        WallTimeInSeconds() - start_time +
+        summary->preprocessor_time_in_seconds;
     summary->iterations.push_back(iteration_summary);
 
     // Iterations inside the line search algorithm are considered
@@ -393,7 +390,7 @@
     // minimizer. The number of line search steps is the total number
     // of inner line search iterations (or steps) across the entire
     // minimization.
-    summary->num_line_search_steps +=  line_search_summary.num_iterations;
+    summary->num_line_search_steps += line_search_summary.num_iterations;
     summary->line_search_cost_evaluation_time_in_seconds +=
         line_search_summary.cost_evaluation_time_in_seconds;
     summary->line_search_gradient_evaluation_time_in_seconds +=
@@ -404,25 +401,26 @@
         line_search_summary.total_time_in_seconds;
     ++summary->num_successful_steps;
 
-    const double step_size_tolerance = options.parameter_tolerance *
-                                       (x_norm + options.parameter_tolerance);
+    const double step_size_tolerance =
+        options.parameter_tolerance * (x_norm + options.parameter_tolerance);
     if (iteration_summary.step_norm <= step_size_tolerance) {
-      summary->message =
-          StringPrintf("Parameter tolerance reached. "
-                       "Relative step_norm: %e <= %e.",
-                       (iteration_summary.step_norm /
-                        (x_norm + options.parameter_tolerance)),
-                       options.parameter_tolerance);
+      summary->message = StringPrintf(
+          "Parameter tolerance reached. "
+          "Relative step_norm: %e <= %e.",
+          (iteration_summary.step_norm /
+           (x_norm + options.parameter_tolerance)),
+          options.parameter_tolerance);
       summary->termination_type = CONVERGENCE;
       VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
       return;
     }
 
     if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
-      summary->message = StringPrintf("Gradient tolerance reached. "
-                                      "Gradient max norm: %e <= %e",
-                                      iteration_summary.gradient_max_norm,
-                                      options.gradient_tolerance);
+      summary->message = StringPrintf(
+          "Gradient tolerance reached. "
+          "Gradient max norm: %e <= %e",
+          iteration_summary.gradient_max_norm,
+          options.gradient_tolerance);
       summary->termination_type = CONVERGENCE;
       VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
       break;
diff --git a/internal/ceres/line_search_minimizer.h b/internal/ceres/line_search_minimizer.h
index 191128a..79e8dc9 100644
--- a/internal/ceres/line_search_minimizer.h
+++ b/internal/ceres/line_search_minimizer.h
@@ -31,10 +31,10 @@
 #ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
 #define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
 
+#include "ceres/internal/eigen.h"
 #include "ceres/minimizer.h"
 #include "ceres/solver.h"
 #include "ceres/types.h"
-#include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
 namespace ceres {
@@ -46,15 +46,13 @@
 class LineSearchMinimizer : public Minimizer {
  public:
   struct State {
-    State(int num_parameters,
-          int num_effective_parameters)
+    State(int num_parameters, int num_effective_parameters)
         : cost(0.0),
           gradient(num_effective_parameters),
           gradient_squared_norm(0.0),
           search_direction(num_effective_parameters),
           directional_derivative(0.0),
-          step_size(0.0) {
-    }
+          step_size(0.0) {}
 
     double cost;
     Vector gradient;
diff --git a/internal/ceres/line_search_minimizer_test.cc b/internal/ceres/line_search_minimizer_test.cc
index 86623b0..2ef27b9 100644
--- a/internal/ceres/line_search_minimizer_test.cc
+++ b/internal/ceres/line_search_minimizer_test.cc
@@ -43,7 +43,6 @@
   bool Evaluate(const double* parameters,
                 double* cost,
                 double* gradient) const final {
-
     cost[0] = parameters[0] * parameters[0];
     if (gradient != NULL) {
       gradient[0] = 2.0 * parameters[0];
diff --git a/internal/ceres/line_search_preprocessor.cc b/internal/ceres/line_search_preprocessor.cc
index 5a21809..6a69425 100644
--- a/internal/ceres/line_search_preprocessor.cc
+++ b/internal/ceres/line_search_preprocessor.cc
@@ -32,6 +32,7 @@
 
 #include <numeric>
 #include <string>
+
 #include "ceres/casts.h"
 #include "ceres/context_impl.h"
 #include "ceres/evaluator.h"
@@ -62,16 +63,14 @@
   pp->evaluator_options.context = pp->problem->context();
   pp->evaluator_options.evaluation_callback =
       pp->reduced_program->mutable_evaluation_callback();
-  pp->evaluator.reset(Evaluator::Create(pp->evaluator_options,
-                                        pp->reduced_program.get(),
-                                        &pp->error));
+  pp->evaluator.reset(Evaluator::Create(
+      pp->evaluator_options, pp->reduced_program.get(), &pp->error));
   return (pp->evaluator.get() != NULL);
 }
 
 }  // namespace
 
-LineSearchPreprocessor::~LineSearchPreprocessor() {
-}
+LineSearchPreprocessor::~LineSearchPreprocessor() {}
 
 bool LineSearchPreprocessor::Preprocess(const Solver::Options& options,
                                         ProblemImpl* problem,
@@ -86,10 +85,8 @@
     return false;
   }
 
-  pp->reduced_program.reset(
-      program->CreateReducedProgram(&pp->removed_parameter_blocks,
-                                    &pp->fixed_cost,
-                                    &pp->error));
+  pp->reduced_program.reset(program->CreateReducedProgram(
+      &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error));
 
   if (pp->reduced_program.get() == NULL) {
     return false;
diff --git a/internal/ceres/line_search_preprocessor_test.cc b/internal/ceres/line_search_preprocessor_test.cc
index 301509c..68860c5 100644
--- a/internal/ceres/line_search_preprocessor_test.cc
+++ b/internal/ceres/line_search_preprocessor_test.cc
@@ -28,9 +28,10 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/line_search_preprocessor.h"
+
 #include <map>
 
-#include "ceres/line_search_preprocessor.h"
 #include "ceres/problem_impl.h"
 #include "ceres/sized_cost_function.h"
 #include "ceres/solver.h"
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
index 7c523d3..299051c 100644
--- a/internal/ceres/linear_least_squares_problems.cc
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -101,7 +101,7 @@
 
   int counter = 0;
   for (int i = 0; i < 3; ++i) {
-    for (int j = 0; j< 2; ++j) {
+    for (int j = 0; j < 2; ++j) {
       Ai[counter] = i;
       Aj[counter] = j;
       ++counter;
@@ -132,7 +132,6 @@
   return problem;
 }
 
-
 /*
       A = [1 0  | 2 0 0
            3 0  | 0 4 0
@@ -187,9 +186,8 @@
   int num_cols = 5;
 
   LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
-  TripletSparseMatrix* A = new TripletSparseMatrix(num_rows,
-                                                   num_cols,
-                                                   num_rows * num_cols);
+  TripletSparseMatrix* A =
+      new TripletSparseMatrix(num_rows, num_cols, num_rows * num_cols);
   problem->b.reset(new double[num_rows]);
   problem->D.reset(new double[num_cols]);
   problem->num_eliminate_blocks = 2;
@@ -404,7 +402,6 @@
   return problem;
 }
 
-
 /*
       A = [1 0
            3 0
@@ -620,8 +617,7 @@
   LOG(INFO) << "A^T: \n" << AA.transpose();
 
   if (D != NULL) {
-    LOG(INFO) << "A's appended diagonal:\n"
-              << ConstVectorRef(D, A->num_cols());
+    LOG(INFO) << "A's appended diagonal:\n" << ConstVectorRef(D, A->num_cols());
   }
 
   if (b != NULL) {
@@ -659,10 +655,8 @@
   string matlab_script;
   StringAppendF(&matlab_script,
                 "function lsqp = load_trust_region_problem()\n");
-  StringAppendF(&matlab_script,
-                "lsqp.num_rows = %d;\n", A->num_rows());
-  StringAppendF(&matlab_script,
-                "lsqp.num_cols = %d;\n", A->num_cols());
+  StringAppendF(&matlab_script, "lsqp.num_rows = %d;\n", A->num_rows());
+  StringAppendF(&matlab_script, "lsqp.num_cols = %d;\n", A->num_cols());
 
   {
     string filename = filename_base + "_A.txt";
@@ -670,8 +664,8 @@
     CHECK(fptr != nullptr);
     A->ToTextFile(fptr);
     fclose(fptr);
-    StringAppendF(&matlab_script,
-                  "tmp = load('%s', '-ascii');\n", filename.c_str());
+    StringAppendF(
+        &matlab_script, "tmp = load('%s', '-ascii');\n", filename.c_str());
     StringAppendF(
         &matlab_script,
         "lsqp.A = sparse(tmp(:, 1) + 1, tmp(:, 2) + 1, tmp(:, 3), %d, %d);\n",
@@ -679,26 +673,25 @@
         A->num_cols());
   }
 
-
   if (D != NULL) {
     string filename = filename_base + "_D.txt";
     WriteArrayToFileOrDie(filename, D, A->num_cols());
-    StringAppendF(&matlab_script,
-                  "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
+    StringAppendF(
+        &matlab_script, "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
   }
 
   if (b != NULL) {
     string filename = filename_base + "_b.txt";
     WriteArrayToFileOrDie(filename, b, A->num_rows());
-    StringAppendF(&matlab_script,
-                  "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
+    StringAppendF(
+        &matlab_script, "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
   }
 
   if (x != NULL) {
     string filename = filename_base + "_x.txt";
     WriteArrayToFileOrDie(filename, x, A->num_cols());
-    StringAppendF(&matlab_script,
-                  "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
+    StringAppendF(
+        &matlab_script, "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
   }
 
   string matlab_filename = filename_base + ".m";
@@ -716,12 +709,11 @@
                                    int num_eliminate_blocks) {
   switch (dump_format_type) {
     case CONSOLE:
-      return DumpLinearLeastSquaresProblemToConsole(A, D, b, x,
-                                                    num_eliminate_blocks);
+      return DumpLinearLeastSquaresProblemToConsole(
+          A, D, b, x, num_eliminate_blocks);
     case TEXTFILE:
-      return DumpLinearLeastSquaresProblemToTextFile(filename_base,
-                                                     A, D, b, x,
-                                                     num_eliminate_blocks);
+      return DumpLinearLeastSquaresProblemToTextFile(
+          filename_base, A, D, b, x, num_eliminate_blocks);
     default:
       LOG(FATAL) << "Unknown DumpFormatType " << dump_format_type;
   }
diff --git a/internal/ceres/linear_least_squares_problems.h b/internal/ceres/linear_least_squares_problems.h
index 5dfcd34..939c2b4 100644
--- a/internal/ceres/linear_least_squares_problems.h
+++ b/internal/ceres/linear_least_squares_problems.h
@@ -34,8 +34,9 @@
 #include <memory>
 #include <string>
 #include <vector>
-#include "ceres/sparse_matrix.h"
+
 #include "ceres/internal/port.h"
+#include "ceres/sparse_matrix.h"
 
 namespace ceres {
 namespace internal {
@@ -43,9 +44,7 @@
 // Structure defining a linear least squares problem and if possible
 // ground truth solutions. To be used by various LinearSolver tests.
 struct LinearLeastSquaresProblem {
-  LinearLeastSquaresProblem()
-      : num_eliminate_blocks(0) {
-  }
+  LinearLeastSquaresProblem() : num_eliminate_blocks(0) {}
 
   std::unique_ptr<SparseMatrix> A;
   std::unique_ptr<double[]> b;
diff --git a/internal/ceres/linear_operator.cc b/internal/ceres/linear_operator.cc
index 9d291bd..548c724 100644
--- a/internal/ceres/linear_operator.cc
+++ b/internal/ceres/linear_operator.cc
@@ -33,8 +33,7 @@
 namespace ceres {
 namespace internal {
 
-LinearOperator::~LinearOperator() {
-}
+LinearOperator::~LinearOperator() {}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/linear_solver.cc b/internal/ceres/linear_solver.cc
index 107af6a..6cae248 100644
--- a/internal/ceres/linear_solver.cc
+++ b/internal/ceres/linear_solver.cc
@@ -33,9 +33,9 @@
 #include "ceres/cgnr_solver.h"
 #include "ceres/dense_normal_cholesky_solver.h"
 #include "ceres/dense_qr_solver.h"
+#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
 #include "ceres/iterative_schur_complement_solver.h"
 #include "ceres/schur_complement_solver.h"
-#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
 #include "ceres/sparse_normal_cholesky_solver.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
@@ -43,8 +43,7 @@
 namespace ceres {
 namespace internal {
 
-LinearSolver::~LinearSolver() {
-}
+LinearSolver::~LinearSolver() {}
 
 LinearSolverType LinearSolver::LinearSolverForZeroEBlocks(
     LinearSolverType linear_solver_type) {
@@ -112,8 +111,7 @@
       return new DenseNormalCholeskySolver(options);
 
     default:
-      LOG(FATAL) << "Unknown linear solver type :"
-                 << options.type;
+      LOG(FATAL) << "Unknown linear solver type :" << options.type;
       return NULL;  // MSVC doesn't understand that LOG(FATAL) never returns.
   }
 }
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index cb624b3..47684e7 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -38,6 +38,7 @@
 #include <map>
 #include <string>
 #include <vector>
+
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/casts.h"
 #include "ceres/compressed_row_sparse_matrix.h"
@@ -74,11 +75,11 @@
 // algebra library should use before computing a sparse factorization
 // (usually Cholesky).
 enum OrderingType {
-  NATURAL, // Do not re-order the matrix. This is useful when the
-           // matrix has been ordered using a fill-reducing ordering
-           // already.
-  AMD      // Use the Approximate Minimum Degree algorithm to re-order
-           // the matrix.
+  NATURAL,  // Do not re-order the matrix. This is useful when the
+            // matrix has been ordered using a fill-reducing ordering
+            // already.
+  AMD       // Use the Approximate Minimum Degree algorithm to re-order
+            // the matrix.
 };
 
 class LinearOperator;
@@ -215,7 +216,6 @@
     // used a preconditioner.
     LinearOperator* preconditioner = nullptr;
 
-
     // The following tolerance related options only makes sense for
     // iterative solvers. Direct solvers ignore them.
 
@@ -329,10 +329,12 @@
 
 // Linear solvers that depend on acccess to the low level structure of
 // a SparseMatrix.
+// clang-format off
 typedef TypedLinearSolver<BlockSparseMatrix>         BlockSparseMatrixSolver;          // NOLINT
 typedef TypedLinearSolver<CompressedRowSparseMatrix> CompressedRowSparseMatrixSolver;  // NOLINT
 typedef TypedLinearSolver<DenseSparseMatrix>         DenseSparseMatrixSolver;          // NOLINT
 typedef TypedLinearSolver<TripletSparseMatrix>       TripletSparseMatrixSolver;        // NOLINT
+// clang-format on
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/loss_function.cc b/internal/ceres/loss_function.cc
index 2c21a73..353f29a 100644
--- a/internal/ceres/loss_function.cc
+++ b/internal/ceres/loss_function.cc
@@ -52,7 +52,7 @@
     const double r = sqrt(s);
     rho[0] = 2.0 * a_ * r - b_;
     rho[1] = std::max(std::numeric_limits<double>::min(), a_ / r);
-    rho[2] = - rho[1] / (2.0 * s);
+    rho[2] = -rho[1] / (2.0 * s);
   } else {
     // Inlier region.
     rho[0] = s;
@@ -67,7 +67,7 @@
   // 'sum' and 'tmp' are always positive, assuming that 's' is.
   rho[0] = 2.0 * b_ * (tmp - 1.0);
   rho[1] = std::max(std::numeric_limits<double>::min(), 1.0 / tmp);
-  rho[2] = - (c_ * rho[1]) / (2.0 * sum);
+  rho[2] = -(c_ * rho[1]) / (2.0 * sum);
 }
 
 void CauchyLoss::Evaluate(double s, double rho[3]) const {
@@ -76,7 +76,7 @@
   // 'sum' and 'inv' are always positive, assuming that 's' is.
   rho[0] = b_ * log(sum);
   rho[1] = std::max(std::numeric_limits<double>::min(), inv);
-  rho[2] = - c_ * (inv * inv);
+  rho[2] = -c_ * (inv * inv);
 }
 
 void ArctanLoss::Evaluate(double s, double rho[3]) const {
@@ -89,9 +89,7 @@
 }
 
 TolerantLoss::TolerantLoss(double a, double b)
-    : a_(a),
-      b_(b),
-      c_(b * log(1.0 + exp(-a / b))) {
+    : a_(a), b_(b), c_(b * log(1.0 + exp(-a / b))) {
   CHECK_GE(a, 0.0);
   CHECK_GT(b, 0.0);
 }
@@ -133,12 +131,11 @@
   }
 }
 
-ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
-                           const LossFunction* g, Ownership ownership_g)
-    : f_(f),
-      g_(g),
-      ownership_f_(ownership_f),
-      ownership_g_(ownership_g) {
+ComposedLoss::ComposedLoss(const LossFunction* f,
+                           Ownership ownership_f,
+                           const LossFunction* g,
+                           Ownership ownership_g)
+    : f_(f), g_(g), ownership_f_(ownership_f), ownership_g_(ownership_g) {
   CHECK(f_ != nullptr);
   CHECK(g_ != nullptr);
 }
diff --git a/internal/ceres/loss_function_test.cc b/internal/ceres/loss_function_test.cc
index 6302dbe..638c0c9 100644
--- a/internal/ceres/loss_function_test.cc
+++ b/internal/ceres/loss_function_test.cc
@@ -66,7 +66,7 @@
   ASSERT_NEAR(fd_1, rho[1], 1e-6);
 
   // Second derivative.
-  const double fd_2 = (fwd[0] - 2*rho[0] + bwd[0]) / (kH * kH);
+  const double fd_2 = (fwd[0] - 2 * rho[0] + bwd[0]) / (kH * kH);
   ASSERT_NEAR(fd_2, rho[2], 1e-6);
 }
 }  // namespace
@@ -219,15 +219,16 @@
     AssertLossFunctionIsValid(scaled_loss, 1.792);
   }
   {
-    ScaledLoss scaled_loss(
-        new TolerantLoss(1.3, 0.1), 10, TAKE_OWNERSHIP);
+    ScaledLoss scaled_loss(new TolerantLoss(1.3, 0.1), 10, TAKE_OWNERSHIP);
     AssertLossFunctionIsValid(scaled_loss, 1.792);
   }
   {
-    ScaledLoss scaled_loss(
-        new ComposedLoss(
-            new HuberLoss(0.8), TAKE_OWNERSHIP,
-            new TolerantLoss(1.3, 0.5), TAKE_OWNERSHIP), 10, TAKE_OWNERSHIP);
+    ScaledLoss scaled_loss(new ComposedLoss(new HuberLoss(0.8),
+                                            TAKE_OWNERSHIP,
+                                            new TolerantLoss(1.3, 0.5),
+                                            TAKE_OWNERSHIP),
+                           10,
+                           TAKE_OWNERSHIP);
     AssertLossFunctionIsValid(scaled_loss, 1.792);
   }
 }
@@ -235,8 +236,7 @@
 TEST(LossFunction, LossFunctionWrapper) {
   // Initialization
   HuberLoss loss_function1(1.0);
-  LossFunctionWrapper loss_function_wrapper(new HuberLoss(1.0),
-                                            TAKE_OWNERSHIP);
+  LossFunctionWrapper loss_function_wrapper(new HuberLoss(1.0), TAKE_OWNERSHIP);
 
   double s = 0.862;
   double rho_gold[3];
@@ -281,7 +281,6 @@
   for (int i = 0; i < 3; ++i) {
     EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
   }
-
 }
 
 }  // namespace internal
diff --git a/internal/ceres/low_rank_inverse_hessian.cc b/internal/ceres/low_rank_inverse_hessian.cc
index f3953c4..c73e5db 100644
--- a/internal/ceres/low_rank_inverse_hessian.cc
+++ b/internal/ceres/low_rank_inverse_hessian.cc
@@ -28,10 +28,11 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/low_rank_inverse_hessian.h"
+
 #include <list>
 
 #include "ceres/internal/eigen.h"
-#include "ceres/low_rank_inverse_hessian.h"
 #include "glog/logging.h"
 
 namespace ceres {
@@ -84,8 +85,7 @@
       approximate_eigenvalue_scale_(1.0),
       delta_x_history_(num_parameters, max_num_corrections),
       delta_gradient_history_(num_parameters, max_num_corrections),
-      delta_x_dot_delta_gradient_(max_num_corrections) {
-}
+      delta_x_dot_delta_gradient_(max_num_corrections) {}
 
 bool LowRankInverseHessian::Update(const Vector& delta_x,
                                    const Vector& delta_gradient) {
@@ -93,13 +93,12 @@
   if (delta_x_dot_delta_gradient <=
       kLBFGSSecantConditionHessianUpdateTolerance) {
     VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too "
-            << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
-            << kLBFGSSecantConditionHessianUpdateTolerance
+            << "small: " << delta_x_dot_delta_gradient
+            << ", tolerance: " << kLBFGSSecantConditionHessianUpdateTolerance
             << " (Secant condition).";
     return false;
   }
 
-
   int next = indices_.size();
   // Once the size of the list reaches max_num_corrections_, simulate
   // a circular buffer by removing the first element of the list and
@@ -132,7 +131,7 @@
        it != indices_.rend();
        ++it) {
     const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
-        delta_x_dot_delta_gradient_(*it);
+                           delta_x_dot_delta_gradient_(*it);
     search_direction -= alpha_i * delta_gradient_history_.col(*it);
     alpha(*it) = alpha_i;
   }
@@ -177,7 +176,7 @@
 
   for (const int i : indices_) {
     const double beta = delta_gradient_history_.col(i).dot(search_direction) /
-        delta_x_dot_delta_gradient_(i);
+                        delta_x_dot_delta_gradient_(i);
     search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
   }
 }
diff --git a/internal/ceres/map_util.h b/internal/ceres/map_util.h
index f55aee3..6e310f8 100644
--- a/internal/ceres/map_util.h
+++ b/internal/ceres/map_util.h
@@ -34,6 +34,7 @@
 #define CERES_INTERNAL_MAP_UTIL_H_
 
 #include <utility>
+
 #include "ceres/internal/port.h"
 #include "glog/logging.h"
 
@@ -55,9 +56,9 @@
 // This version assumes the key is printable, and includes it in the fatal log
 // message.
 template <class Collection>
-const typename Collection::value_type::second_type&
-FindOrDie(const Collection& collection,
-          const typename Collection::value_type::first_type& key) {
+const typename Collection::value_type::second_type& FindOrDie(
+    const Collection& collection,
+    const typename Collection::value_type::first_type& key) {
   typename Collection::const_iterator it = collection.find(key);
   CHECK(it != collection.end()) << "Map key not found: " << key;
   return it->second;
@@ -67,10 +68,10 @@
 // If the key is present in the map then the value associated with that
 // key is returned, otherwise the value passed as a default is returned.
 template <class Collection>
-const typename Collection::value_type::second_type
-FindWithDefault(const Collection& collection,
-                const typename Collection::value_type::first_type& key,
-                const typename Collection::value_type::second_type& value) {
+const typename Collection::value_type::second_type FindWithDefault(
+    const Collection& collection,
+    const typename Collection::value_type::first_type& key,
+    const typename Collection::value_type::second_type& value) {
   typename Collection::const_iterator it = collection.find(key);
   if (it == collection.end()) {
     return value;
@@ -84,7 +85,7 @@
 // took place, false indicates the key was already present.
 template <class Collection>
 bool InsertIfNotPresent(
-    Collection * const collection,
+    Collection* const collection,
     const typename Collection::value_type::first_type& key,
     const typename Collection::value_type::second_type& value) {
   std::pair<typename Collection::iterator, bool> ret =
@@ -96,9 +97,9 @@
 // Same as above but the returned pointer is not const and can be used to change
 // the stored value.
 template <class Collection>
-typename Collection::value_type::second_type*
-FindOrNull(Collection& collection,  // NOLINT
-           const typename Collection::value_type::first_type& key) {
+typename Collection::value_type::second_type* FindOrNull(
+    Collection& collection,  // NOLINT
+    const typename Collection::value_type::first_type& key) {
   typename Collection::iterator it = collection.find(key);
   if (it == collection.end()) {
     return 0;
@@ -116,13 +117,13 @@
 
 // Inserts a new key/value into a map or hash_map.
 // Dies if the key is already present.
-template<class Collection>
+template <class Collection>
 void InsertOrDie(Collection* const collection,
                  const typename Collection::value_type::first_type& key,
                  const typename Collection::value_type::second_type& data) {
   typedef typename Collection::value_type value_type;
   CHECK(collection->insert(value_type(key, data)).second)
-    << "duplicate key: " << key;
+      << "duplicate key: " << key;
 }
 
 }  // namespace ceres
diff --git a/internal/ceres/miniglog/glog/logging.cc b/internal/ceres/miniglog/glog/logging.cc
index 372ecb0..0863f61 100644
--- a/internal/ceres/miniglog/glog/logging.cc
+++ b/internal/ceres/miniglog/glog/logging.cc
@@ -34,6 +34,6 @@
 
 // This is the set of log sinks. This must be in a separate library to ensure
 // that there is only one instance of this across the entire program.
-std::set<google::LogSink *> log_sinks_global;
+std::set<google::LogSink*> log_sinks_global;
 
-}  // namespace ceres
+}  // namespace google
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
index 0fdf382..b442d08 100644
--- a/internal/ceres/miniglog/glog/logging.h
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -93,7 +93,7 @@
 #define CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
 
 #ifdef ANDROID
-#  include <android/log.h>
+#include <android/log.h>
 #endif  // ANDROID
 
 #include <algorithm>
@@ -106,24 +106,28 @@
 #include <vector>
 
 // For appropriate definition of CERES_EXPORT macro.
-#include "ceres/internal/port.h"
 #include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/port.h"
 
 // Log severity level constants.
+// clang-format off
 const int FATAL   = -3;
 const int ERROR   = -2;
 const int WARNING = -1;
 const int INFO    =  0;
+// clang-format on
 
 // ------------------------- Glog compatibility ------------------------------
 
 namespace google {
 
 typedef int LogSeverity;
+// clang-format off
 const int INFO    = ::INFO;
 const int WARNING = ::WARNING;
 const int ERROR   = ::ERROR;
 const int FATAL   = ::FATAL;
+// clang-format on
 
 // Sink class used for integration with mock and test functions. If sinks are
 // added, all log output is also sent to each sink through the send function.
@@ -143,20 +147,18 @@
 };
 
 // Global set of log sinks. The actual object is defined in logging.cc.
-extern CERES_EXPORT std::set<LogSink *> log_sinks_global;
+extern CERES_EXPORT std::set<LogSink*> log_sinks_global;
 
-inline void InitGoogleLogging(char *argv) {
+inline void InitGoogleLogging(char* argv) {
   // Do nothing; this is ignored.
 }
 
 // Note: the Log sink functions are not thread safe.
-inline void AddLogSink(LogSink *sink) {
+inline void AddLogSink(LogSink* sink) {
   // TODO(settinger): Add locks for thread safety.
   log_sinks_global.insert(sink);
 }
-inline void RemoveLogSink(LogSink *sink) {
-  log_sinks_global.erase(sink);
-}
+inline void RemoveLogSink(LogSink* sink) { log_sinks_global.erase(sink); }
 
 }  // namespace google
 
@@ -170,8 +172,8 @@
 // use of the log macros LG, LOG, or VLOG.
 class CERES_EXPORT MessageLogger {
  public:
-  MessageLogger(const char *file, int line, const char *tag, int severity)
-    : file_(file), line_(line), tag_(tag), severity_(severity) {
+  MessageLogger(const char* file, int line, const char* tag, int severity)
+      : file_(file), line_(line), tag_(tag), severity_(severity) {
     // Pre-pend the stream with the file and line number.
     StripBasename(std::string(file), &filename_only_);
     stream_ << filename_only_ << ":" << line << " ";
@@ -193,8 +195,8 @@
 
     // Bound the logging level.
     const int kMaxVerboseLevel = 2;
-    int android_level_index = std::min(std::max(FATAL, severity_),
-                                       kMaxVerboseLevel) - FATAL;
+    int android_level_index =
+        std::min(std::max(FATAL, severity_), kMaxVerboseLevel) - FATAL;
     int android_log_level = android_log_levels[android_level_index];
 
     // Output the log string the Android log at the appropriate level.
@@ -202,9 +204,7 @@
 
     // Indicate termination if needed.
     if (severity_ == FATAL) {
-      __android_log_write(ANDROID_LOG_FATAL,
-                          tag_.c_str(),
-                          "terminating.\n");
+      __android_log_write(ANDROID_LOG_FATAL, tag_.c_str(), "terminating.\n");
     }
 #else
     // If not building on Android, log all output to std::cerr.
@@ -222,12 +222,12 @@
   }
 
   // Return the stream associated with the logger object.
-  std::stringstream &stream() { return stream_; }
+  std::stringstream& stream() { return stream_; }
 
  private:
   void LogToSinks(int severity) {
     time_t rawtime;
-    time (&rawtime);
+    time(&rawtime);
 
     struct tm timeinfo;
 #if defined(WIN32) || defined(_WIN32) || defined(__WIN32__)
@@ -241,24 +241,31 @@
     std::set<google::LogSink*>::iterator iter;
     // Send the log message to all sinks.
     for (iter = google::log_sinks_global.begin();
-         iter != google::log_sinks_global.end(); ++iter) {
-      (*iter)->send(severity, file_.c_str(), filename_only_.c_str(), line_,
-                    &timeinfo, stream_.str().c_str(), stream_.str().size());
+         iter != google::log_sinks_global.end();
+         ++iter) {
+      (*iter)->send(severity,
+                    file_.c_str(),
+                    filename_only_.c_str(),
+                    line_,
+                    &timeinfo,
+                    stream_.str().c_str(),
+                    stream_.str().size());
     }
   }
 
   void WaitForSinks() {
     // TODO(settinger): Add locks for thread safety.
-    std::set<google::LogSink *>::iterator iter;
+    std::set<google::LogSink*>::iterator iter;
 
     // Call WaitTillSent() for all sinks.
     for (iter = google::log_sinks_global.begin();
-         iter != google::log_sinks_global.end(); ++iter) {
+         iter != google::log_sinks_global.end();
+         ++iter) {
       (*iter)->WaitTillSent();
     }
   }
 
-  void StripBasename(const std::string &full_path, std::string *filename) {
+  void StripBasename(const std::string& full_path, std::string* filename) {
     // TODO(settinger): Add support for OSs with different path separators.
     const char kSeparator = '/';
     size_t pos = full_path.rfind(kSeparator);
@@ -284,16 +291,18 @@
 // is not used" and "statement has no effect".
 class CERES_EXPORT LoggerVoidify {
  public:
-  LoggerVoidify() { }
+  LoggerVoidify() {}
   // This has to be an operator with a precedence lower than << but
   // higher than ?:
-  void operator&(const std::ostream &s) { }
+  void operator&(const std::ostream& s) {}
 };
 
 // Log only if condition is met.  Otherwise evaluates to void.
+// clang-format off
 #define LOG_IF(severity, condition) \
     !(condition) ? (void) 0 : LoggerVoidify() & \
       MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+// clang-format on
 
 // Log only if condition is NOT met.  Otherwise evaluates to void.
 #define LOG_IF_FALSE(severity, condition) LOG_IF(severity, !(condition))
@@ -301,6 +310,7 @@
 // LG is a convenient shortcut for LOG(INFO). Its use is in new
 // google3 code is discouraged and the following shortcut exists for
 // backward compatibility with existing code.
+// clang-format off
 #ifdef MAX_LOG_LEVEL
 #  define LOG(n)  LOG_IF(n, n <= MAX_LOG_LEVEL)
 #  define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
@@ -326,37 +336,39 @@
 #  define DLOG(severity) true ? (void) 0 : LoggerVoidify() & \
       MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
 #endif
-
+// clang-format on
 
 // Log a message and terminate.
-template<class T>
-void LogMessageFatal(const char *file, int line, const T &message) {
-  MessageLogger((char *)__FILE__, __LINE__, "native", FATAL).stream()
-      << message;
+template <class T>
+void LogMessageFatal(const char* file, int line, const T& message) {
+  MessageLogger((char*)__FILE__, __LINE__, "native", FATAL).stream() << message;
 }
 
 // ---------------------------- CHECK macros ---------------------------------
 
 // Check for a given boolean condition.
-#define CHECK(condition) LOG_IF_FALSE(FATAL, condition) \
-        << "Check failed: " #condition " "
+#define CHECK(condition) \
+  LOG_IF_FALSE(FATAL, condition) << "Check failed: " #condition " "
 
 #ifndef NDEBUG
 // Debug only version of CHECK
-#  define DCHECK(condition) LOG_IF_FALSE(FATAL, condition) \
-          << "Check failed: " #condition " "
+#define DCHECK(condition) \
+  LOG_IF_FALSE(FATAL, condition) << "Check failed: " #condition " "
 #else
 // Optimized version - generates no code.
-#  define DCHECK(condition) if (false) LOG_IF_FALSE(FATAL, condition) \
-          << "Check failed: " #condition " "
+#define DCHECK(condition) \
+  if (false) LOG_IF_FALSE(FATAL, condition) << "Check failed: " #condition " "
 #endif  // NDEBUG
 
 // ------------------------- CHECK_OP macros ---------------------------------
 
 // Generic binary operator check macro. This should not be directly invoked,
 // instead use the binary comparison macros defined below.
-#define CHECK_OP(val1, val2, op) LOG_IF_FALSE(FATAL, ((val1) op (val2))) \
-  << "Check failed: " #val1 " " #op " " #val2 " "
+#define CHECK_OP(val1, val2, op)        \
+  LOG_IF_FALSE(FATAL, ((val1)op(val2))) \
+      << "Check failed: " #val1 " " #op " " #val2 " "
+
+// clang-format off
 
 // Check_op macro definitions
 #define CHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
@@ -384,12 +396,14 @@
 #  define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
 #endif  // NDEBUG
 
+// clang-format on
+
 // ---------------------------CHECK_NOTNULL macros ---------------------------
 
 // Helpers for CHECK_NOTNULL(). Two are necessary to support both raw pointers
 // and smart pointers.
 template <typename T>
-T& CheckNotNullCommon(const char *file, int line, const char *names, T& t) {
+T& CheckNotNullCommon(const char* file, int line, const char* names, T& t) {
   if (t == NULL) {
     LogMessageFatal(file, line, std::string(names));
   }
@@ -397,12 +411,12 @@
 }
 
 template <typename T>
-T* CheckNotNull(const char *file, int line, const char *names, T* t) {
+T* CheckNotNull(const char* file, int line, const char* names, T* t) {
   return CheckNotNullCommon(file, line, names, t);
 }
 
 template <typename T>
-T& CheckNotNull(const char *file, int line, const char *names, T& t) {
+T& CheckNotNull(const char* file, int line, const char* names, T& t) {
   return CheckNotNullCommon(file, line, names, t);
 }
 
@@ -416,7 +430,8 @@
   CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
 #else
 // Optimized version - generates no code.
-#define DCHECK_NOTNULL(val) if (false)\
+#define DCHECK_NOTNULL(val) \
+  if (false)                \
   CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
 #endif  // NDEBUG
 
diff --git a/internal/ceres/minimizer.cc b/internal/ceres/minimizer.cc
index f596033..4943a75 100644
--- a/internal/ceres/minimizer.cc
+++ b/internal/ceres/minimizer.cc
@@ -28,8 +28,9 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include "ceres/line_search_minimizer.h"
 #include "ceres/minimizer.h"
+
+#include "ceres/line_search_minimizer.h"
 #include "ceres/trust_region_minimizer.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
@@ -50,7 +51,6 @@
   return NULL;
 }
 
-
 Minimizer::~Minimizer() {}
 
 bool Minimizer::RunCallbacks(const Minimizer::Options& options,
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
index afdd60d..69416b3 100644
--- a/internal/ceres/minimizer.h
+++ b/internal/ceres/minimizer.h
@@ -34,6 +34,7 @@
 #include <memory>
 #include <string>
 #include <vector>
+
 #include "ceres/internal/port.h"
 #include "ceres/iteration_callback.h"
 #include "ceres/solver.h"
@@ -54,13 +55,9 @@
   // see solver.h for detailed information about the meaning and
   // default values of each of these parameters.
   struct Options {
-    Options() {
-      Init(Solver::Options());
-    }
+    Options() { Init(Solver::Options()); }
 
-    explicit Options(const Solver::Options& options) {
-      Init(options);
-    }
+    explicit Options(const Solver::Options& options) { Init(options); }
 
     void Init(const Solver::Options& options) {
       num_threads = options.num_threads;
@@ -92,8 +89,7 @@
       max_lbfgs_rank = options.max_lbfgs_rank;
       use_approximate_eigenvalue_bfgs_scaling =
           options.use_approximate_eigenvalue_bfgs_scaling;
-      line_search_interpolation_type =
-          options.line_search_interpolation_type;
+      line_search_interpolation_type = options.line_search_interpolation_type;
       min_line_search_step_size = options.min_line_search_step_size;
       line_search_sufficient_function_decrease =
           options.line_search_sufficient_function_decrease;
@@ -107,8 +103,7 @@
           options.max_num_line_search_direction_restarts;
       line_search_sufficient_curvature_decrease =
           options.line_search_sufficient_curvature_decrease;
-      max_line_search_step_expansion =
-          options.max_line_search_step_expansion;
+      max_line_search_step_expansion = options.max_line_search_step_expansion;
       inner_iteration_tolerance = options.inner_iteration_tolerance;
       is_silent = (options.logging_type == SILENT);
       is_constrained = false;
diff --git a/internal/ceres/minimizer_test.cc b/internal/ceres/minimizer_test.cc
index b0a2d96..3de4abe 100644
--- a/internal/ceres/minimizer_test.cc
+++ b/internal/ceres/minimizer_test.cc
@@ -28,10 +28,11 @@
 //
 // Author: keir@google.com (Keir Mierle)
 
-#include "gtest/gtest.h"
-#include "ceres/iteration_callback.h"
 #include "ceres/minimizer.h"
+
+#include "ceres/iteration_callback.h"
 #include "ceres/solver.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
diff --git a/internal/ceres/normal_prior.cc b/internal/ceres/normal_prior.cc
index a3d5d8e..4a62132 100644
--- a/internal/ceres/normal_prior.cc
+++ b/internal/ceres/normal_prior.cc
@@ -32,14 +32,14 @@
 
 #include <cstddef>
 #include <vector>
+
 #include "ceres/internal/eigen.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
 
 namespace ceres {
 
-NormalPrior::NormalPrior(const Matrix& A, const Vector& b)
-    : A_(A), b_(b) {
+NormalPrior::NormalPrior(const Matrix& A, const Vector& b) : A_(A), b_(b) {
   CHECK_GT(b_.rows(), 0);
   CHECK_GT(A_.rows(), 0);
   CHECK_EQ(b_.rows(), A.cols());
diff --git a/internal/ceres/normal_prior_test.cc b/internal/ceres/normal_prior_test.cc
index 9abbf7f..518c18e 100644
--- a/internal/ceres/normal_prior_test.cc
+++ b/internal/ceres/normal_prior_test.cc
@@ -32,9 +32,9 @@
 
 #include <cstddef>
 
-#include "gtest/gtest.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/random.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -42,8 +42,7 @@
 namespace {
 
 void RandomVector(Vector* v) {
-  for (int r = 0; r < v->rows(); ++r)
-    (*v)[r] = 2 * RandDouble() - 1;
+  for (int r = 0; r < v->rows(); ++r) (*v)[r] = 2 * RandDouble() - 1;
 }
 
 void RandomMatrix(Matrix* m) {
@@ -67,11 +66,10 @@
       Matrix A(num_rows, num_cols);
       RandomMatrix(&A);
 
-      double * x = new double[num_cols];
-      for (int i = 0; i < num_cols; ++i)
-        x[i] = 2 * RandDouble() - 1;
+      double* x = new double[num_cols];
+      for (int i = 0; i < num_cols; ++i) x[i] = 2 * RandDouble() - 1;
 
-      double * jacobian = new double[num_rows * num_cols];
+      double* jacobian = new double[num_rows * num_cols];
       Vector residuals(num_rows);
 
       NormalPrior prior(A, b);
@@ -87,8 +85,8 @@
       double jacobian_diff_norm = (J - A).norm();
       EXPECT_NEAR(jacobian_diff_norm, 0.0, 1e-10);
 
-      delete []x;
-      delete []jacobian;
+      delete[] x;
+      delete[] jacobian;
     }
   }
 }
@@ -104,9 +102,8 @@
       Matrix A(num_rows, num_cols);
       RandomMatrix(&A);
 
-      double * x = new double[num_cols];
-      for (int i = 0; i < num_cols; ++i)
-        x[i] = 2 * RandDouble() - 1;
+      double* x = new double[num_cols];
+      for (int i = 0; i < num_cols; ++i) x[i] = 2 * RandDouble() - 1;
 
       double* jacobians[1];
       jacobians[0] = NULL;
@@ -127,8 +124,7 @@
           (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
       EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
 
-
-      delete []x;
+      delete[] x;
     }
   }
 }
diff --git a/internal/ceres/numeric_diff_cost_function_test.cc b/internal/ceres/numeric_diff_cost_function_test.cc
index 20cf1b2..a5f7a15 100644
--- a/internal/ceres/numeric_diff_cost_function_test.cc
+++ b/internal/ceres/numeric_diff_cost_function_test.cc
@@ -50,39 +50,36 @@
 
 TEST(NumericDiffCostFunction, EasyCaseFunctorCentralDifferences) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor,
-                                  CENTRAL,
-                                  3,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new EasyFunctor));
+  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
+                                                  CENTRAL,
+                                                  3,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(new EasyFunctor));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseFunctorForwardDifferences) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor,
-                                  FORWARD,
-                                  3,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new EasyFunctor));
+  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
+                                                  FORWARD,
+                                                  3,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(new EasyFunctor));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseFunctorRidders) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor,
-                                  RIDDERS,
-                                  3,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new EasyFunctor));
+  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
+                                                  RIDDERS,
+                                                  3,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(new EasyFunctor));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
@@ -92,10 +89,10 @@
   cost_function.reset(
       new NumericDiffCostFunction<EasyCostFunction,
                                   CENTRAL,
-                                  3,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new EasyCostFunction, TAKE_OWNERSHIP));
+                                  3,  // number of residuals
+                                  5,  // size of x1
+                                  5   // size of x2
+                                  >(new EasyCostFunction, TAKE_OWNERSHIP));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
@@ -105,10 +102,10 @@
   cost_function.reset(
       new NumericDiffCostFunction<EasyCostFunction,
                                   FORWARD,
-                                  3,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new EasyCostFunction, TAKE_OWNERSHIP));
+                                  3,  // number of residuals
+                                  5,  // size of x1
+                                  5   // size of x2
+                                  >(new EasyCostFunction, TAKE_OWNERSHIP));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
@@ -118,44 +115,39 @@
   cost_function.reset(
       new NumericDiffCostFunction<EasyCostFunction,
                                   RIDDERS,
-                                  3,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new EasyCostFunction, TAKE_OWNERSHIP));
+                                  3,  // number of residuals
+                                  5,  // size of x1
+                                  5   // size of x2
+                                  >(new EasyCostFunction, TAKE_OWNERSHIP));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
 
-TEST(NumericDiffCostFunction,
-     TranscendentalCaseFunctorCentralDifferences) {
+TEST(NumericDiffCostFunction, TranscendentalCaseFunctorCentralDifferences) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<TranscendentalFunctor,
-                                  CENTRAL,
-                                  2,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new TranscendentalFunctor));
+  cost_function.reset(new NumericDiffCostFunction<TranscendentalFunctor,
+                                                  CENTRAL,
+                                                  2,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(new TranscendentalFunctor));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
-TEST(NumericDiffCostFunction,
-     TranscendentalCaseFunctorForwardDifferences) {
+TEST(NumericDiffCostFunction, TranscendentalCaseFunctorForwardDifferences) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<TranscendentalFunctor,
-                                  FORWARD,
-                                  2,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new TranscendentalFunctor));
+  cost_function.reset(new NumericDiffCostFunction<TranscendentalFunctor,
+                                                  FORWARD,
+                                                  2,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(new TranscendentalFunctor));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
 
-TEST(NumericDiffCostFunction,
-     TranscendentalCaseFunctorRidders) {
+TEST(NumericDiffCostFunction, TranscendentalCaseFunctorRidders) {
   NumericDiffOptions options;
 
   // Using a smaller initial step size to overcome oscillatory function
@@ -163,13 +155,13 @@
   options.ridders_relative_initial_step_size = 1e-3;
 
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<TranscendentalFunctor,
-                                  RIDDERS,
-                                  2,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new TranscendentalFunctor, TAKE_OWNERSHIP, 2, options));
+  cost_function.reset(new NumericDiffCostFunction<TranscendentalFunctor,
+                                                  RIDDERS,
+                                                  2,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(
+      new TranscendentalFunctor, TAKE_OWNERSHIP, 2, options));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
@@ -177,13 +169,13 @@
 TEST(NumericDiffCostFunction,
      TranscendentalCaseCostFunctionCentralDifferences) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<TranscendentalCostFunction,
-                                  CENTRAL,
-                                  2,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new TranscendentalCostFunction, TAKE_OWNERSHIP));
+  cost_function.reset(new NumericDiffCostFunction<TranscendentalCostFunction,
+                                                  CENTRAL,
+                                                  2,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(
+      new TranscendentalCostFunction, TAKE_OWNERSHIP));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
@@ -191,19 +183,18 @@
 TEST(NumericDiffCostFunction,
      TranscendentalCaseCostFunctionForwardDifferences) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<TranscendentalCostFunction,
-                                  FORWARD,
-                                  2,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new TranscendentalCostFunction, TAKE_OWNERSHIP));
+  cost_function.reset(new NumericDiffCostFunction<TranscendentalCostFunction,
+                                                  FORWARD,
+                                                  2,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(
+      new TranscendentalCostFunction, TAKE_OWNERSHIP));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
 
-TEST(NumericDiffCostFunction,
-     TranscendentalCaseCostFunctionRidders) {
+TEST(NumericDiffCostFunction, TranscendentalCaseCostFunctionRidders) {
   NumericDiffOptions options;
 
   // Using a smaller initial step size to overcome oscillatory function
@@ -211,18 +202,18 @@
   options.ridders_relative_initial_step_size = 1e-3;
 
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<TranscendentalCostFunction,
-                                  RIDDERS,
-                                  2,  /* number of residuals */
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-          new TranscendentalCostFunction, TAKE_OWNERSHIP, 2, options));
+  cost_function.reset(new NumericDiffCostFunction<TranscendentalCostFunction,
+                                                  RIDDERS,
+                                                  2,  // number of residuals
+                                                  5,  // size of x1
+                                                  5   // size of x2
+                                                  >(
+      new TranscendentalCostFunction, TAKE_OWNERSHIP, 2, options));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
 
-template<int num_rows, int num_cols>
+template <int num_rows, int num_cols>
 class SizeTestingCostFunction : public SizedCostFunction<num_rows, num_cols> {
  public:
   bool Evaluate(double const* const* parameters,
@@ -241,20 +232,20 @@
 TEST(NumericDiffCostFunction, EigenRowMajorColMajorTest) {
   std::unique_ptr<CostFunction> cost_function;
   cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<1,1>,  CENTRAL, 1, 1>(
-          new SizeTestingCostFunction<1,1>, ceres::TAKE_OWNERSHIP));
+      new NumericDiffCostFunction<SizeTestingCostFunction<1, 1>, CENTRAL, 1, 1>(
+          new SizeTestingCostFunction<1, 1>, ceres::TAKE_OWNERSHIP));
 
   cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<2,1>,  CENTRAL, 2, 1>(
-          new SizeTestingCostFunction<2,1>, ceres::TAKE_OWNERSHIP));
+      new NumericDiffCostFunction<SizeTestingCostFunction<2, 1>, CENTRAL, 2, 1>(
+          new SizeTestingCostFunction<2, 1>, ceres::TAKE_OWNERSHIP));
 
   cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<1,2>,  CENTRAL, 1, 2>(
-          new SizeTestingCostFunction<1,2>, ceres::TAKE_OWNERSHIP));
+      new NumericDiffCostFunction<SizeTestingCostFunction<1, 2>, CENTRAL, 1, 2>(
+          new SizeTestingCostFunction<1, 2>, ceres::TAKE_OWNERSHIP));
 
   cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<2,2>,  CENTRAL, 2, 2>(
-          new SizeTestingCostFunction<2,2>, ceres::TAKE_OWNERSHIP));
+      new NumericDiffCostFunction<SizeTestingCostFunction<2, 2>, CENTRAL, 2, 2>(
+          new SizeTestingCostFunction<2, 2>, ceres::TAKE_OWNERSHIP));
 
   cost_function.reset(
       new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>(
@@ -288,21 +279,20 @@
       new NumericDiffCostFunction<EasyFunctor,
                                   CENTRAL,
                                   ceres::DYNAMIC,
-                                  5,  /* size of x1 */
-                                  5   /* size of x2 */>(
-                                      new EasyFunctor, TAKE_OWNERSHIP, 3));
+                                  5,  // size of x1
+                                  5   // size of x2
+                                  >(new EasyFunctor, TAKE_OWNERSHIP, 3));
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction, ExponentialFunctorRidders) {
   std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<ExponentialFunctor,
-                                  RIDDERS,
-                                  1,  /* number of residuals */
-                                  1   /* size of x1 */>(
-             new ExponentialFunctor));
+  cost_function.reset(new NumericDiffCostFunction<ExponentialFunctor,
+                                                  RIDDERS,
+                                                  1,  // number of residuals
+                                                  1   // size of x1
+                                                  >(new ExponentialFunctor));
   ExponentialFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
@@ -312,9 +302,9 @@
   cost_function.reset(
       new NumericDiffCostFunction<ExponentialCostFunction,
                                   RIDDERS,
-                                  1,  /* number of residuals */
-                                  1   /* size of x1 */>(
-             new ExponentialCostFunction));
+                                  1,  // number of residuals
+                                  1   // size of x1
+                                  >(new ExponentialCostFunction));
   ExponentialFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
@@ -326,14 +316,16 @@
   // presence of random noise.
   options.ridders_relative_initial_step_size = 10.0;
 
-  cost_function.reset(
-      new NumericDiffCostFunction<RandomizedFunctor,
-                                  RIDDERS,
-                                  1,  /* number of residuals */
-                                  1   /* size of x1 */>(
-             new RandomizedFunctor(kNoiseFactor, kRandomSeed), TAKE_OWNERSHIP,
-             1, options));
-  RandomizedFunctor functor (kNoiseFactor, kRandomSeed);
+  cost_function.reset(new NumericDiffCostFunction<RandomizedFunctor,
+                                                  RIDDERS,
+                                                  1,  // number of residuals
+                                                  1   // size of x1
+                                                  >(
+      new RandomizedFunctor(kNoiseFactor, kRandomSeed),
+      TAKE_OWNERSHIP,
+      1,
+      options));
+  RandomizedFunctor functor(kNoiseFactor, kRandomSeed);
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
 
@@ -344,14 +336,16 @@
   // presence of random noise.
   options.ridders_relative_initial_step_size = 10.0;
 
-  cost_function.reset(
-      new NumericDiffCostFunction<RandomizedCostFunction,
-                                  RIDDERS,
-                                  1,  /* number of residuals */
-                                  1   /* size of x1 */>(
-             new RandomizedCostFunction(kNoiseFactor, kRandomSeed),
-             TAKE_OWNERSHIP, 1, options));
-  RandomizedFunctor functor (kNoiseFactor, kRandomSeed);
+  cost_function.reset(new NumericDiffCostFunction<RandomizedCostFunction,
+                                                  RIDDERS,
+                                                  1,  // number of residuals
+                                                  1   // size of x1
+                                                  >(
+      new RandomizedCostFunction(kNoiseFactor, kRandomSeed),
+      TAKE_OWNERSHIP,
+      1,
+      options));
+  RandomizedFunctor functor(kNoiseFactor, kRandomSeed);
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
 
diff --git a/internal/ceres/numeric_diff_test_utils.cc b/internal/ceres/numeric_diff_test_utils.cc
index ab1b5f8..d833bbb 100644
--- a/internal/ceres/numeric_diff_test_utils.cc
+++ b/internal/ceres/numeric_diff_test_utils.cc
@@ -33,12 +33,12 @@
 
 #include <algorithm>
 #include <cmath>
+
 #include "ceres/cost_function.h"
 #include "ceres/test_util.h"
 #include "ceres/types.h"
 #include "gtest/gtest.h"
 
-
 namespace ceres {
 namespace internal {
 
@@ -55,23 +55,22 @@
 }
 
 void EasyFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
-    const CostFunction& cost_function,
-    NumericDiffMethodType method) const {
-  // The x1[0] is made deliberately small to test the performance near
-  // zero.
+    const CostFunction& cost_function, NumericDiffMethodType method) const {
+  // The x1[0] is made deliberately small to test the performance near zero.
+  // clang-format off
   double x1[] = { 1e-64, 2.0, 3.0, 4.0, 5.0 };
   double x2[] = { 9.0, 9.0, 5.0, 5.0, 1.0 };
   double *parameters[] = { &x1[0], &x2[0] };
+  // clang-format on
 
   double dydx1[15];  // 3 x 5, row major.
   double dydx2[15];  // 3 x 5, row major.
-  double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+  double* jacobians[2] = {&dydx1[0], &dydx2[0]};
 
-  double residuals[3] = {-1e-100, -2e-100, -3e-100 };
+  double residuals[3] = {-1e-100, -2e-100, -3e-100};
 
-  ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
-                                     &residuals[0],
-                                     &jacobians[0]));
+  ASSERT_TRUE(
+      cost_function.Evaluate(&parameters[0], &residuals[0], &jacobians[0]));
 
   double expected_residuals[3];
   EasyFunctor functor;
@@ -97,12 +96,14 @@
   }
 
   for (int i = 0; i < 5; ++i) {
+    // clang-format off
     ExpectClose(x2[i],                    dydx1[5 * 0 + i], tolerance);  // y1
     ExpectClose(x1[i],                    dydx2[5 * 0 + i], tolerance);
     ExpectClose(2 * x2[i] * residuals[0], dydx1[5 * 1 + i], tolerance);  // y2
     ExpectClose(2 * x1[i] * residuals[0], dydx2[5 * 1 + i], tolerance);
     ExpectClose(0.0,                      dydx1[5 * 2 + i], tolerance);  // y3
     ExpectClose(2 * x2[i],                dydx2[5 * 2 + i], tolerance);
+    // clang-format on
   }
 }
 
@@ -119,14 +120,13 @@
 }
 
 void TranscendentalFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
-    const CostFunction& cost_function,
-    NumericDiffMethodType method) const {
-
+    const CostFunction& cost_function, NumericDiffMethodType method) const {
   struct TestParameterBlocks {
     double x1[5];
     double x2[5];
   };
 
+  // clang-format off
   std::vector<TestParameterBlocks> kTests =  {
     { { 1.0, 2.0, 3.0, 4.0, 5.0 },  // No zeros.
       { 9.0, 9.0, 5.0, 5.0, 1.0 },
@@ -147,21 +147,21 @@
       { 0.0, 0.0, 0.0, 0.0, 0.0 },
     },
   };
+  // clang-format on
 
   for (int k = 0; k < kTests.size(); ++k) {
-    double *x1 = &(kTests[k].x1[0]);
-    double *x2 = &(kTests[k].x2[0]);
-    double *parameters[] = { x1, x2 };
+    double* x1 = &(kTests[k].x1[0]);
+    double* x2 = &(kTests[k].x2[0]);
+    double* parameters[] = {x1, x2};
 
     double dydx1[10];
     double dydx2[10];
-    double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+    double* jacobians[2] = {&dydx1[0], &dydx2[0]};
 
     double residuals[2];
 
-    ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
-                                       &residuals[0],
-                                       &jacobians[0]));
+    ASSERT_TRUE(
+        cost_function.Evaluate(&parameters[0], &residuals[0], &jacobians[0]));
     double x1x2 = 0;
     for (int i = 0; i < 5; ++i) {
       x1x2 += x1[i] * x2[i];
@@ -184,39 +184,37 @@
     }
 
     for (int i = 0; i < 5; ++i) {
+      // clang-format off
       ExpectClose( x2[i] * cos(x1x2),              dydx1[5 * 0 + i], tolerance);
       ExpectClose( x1[i] * cos(x1x2),              dydx2[5 * 0 + i], tolerance);
       ExpectClose(-x2[i] * exp(-x1x2 / 10.) / 10., dydx1[5 * 1 + i], tolerance);
       ExpectClose(-x1[i] * exp(-x1x2 / 10.) / 10., dydx2[5 * 1 + i], tolerance);
+      // clang-format on
     }
   }
 }
 
-bool ExponentialFunctor::operator()(const double* x1,
-                                    double* residuals) const {
+bool ExponentialFunctor::operator()(const double* x1, double* residuals) const {
   residuals[0] = exp(x1[0]);
   return true;
 }
 
-
 void ExponentialFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
     const CostFunction& cost_function) const {
   // Evaluating the functor at specific points for testing.
-  std::vector<double> kTests = { 1.0, 2.0, 3.0, 4.0, 5.0 };
+  std::vector<double> kTests = {1.0, 2.0, 3.0, 4.0, 5.0};
 
   // Minimal tolerance w.r.t. the cost function and the tests.
   const double kTolerance = 2e-14;
 
   for (int k = 0; k < kTests.size(); ++k) {
-    double *parameters[] = { &kTests[k] };
+    double* parameters[] = {&kTests[k]};
     double dydx;
-    double *jacobians[1] = { &dydx };
+    double* jacobians[1] = {&dydx};
     double residual;
 
-    ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
-                                       &residual,
-                                       &jacobians[0]));
-
+    ASSERT_TRUE(
+        cost_function.Evaluate(&parameters[0], &residual, &jacobians[0]));
 
     double expected_result = exp(kTests[k]);
 
@@ -228,10 +226,9 @@
   }
 }
 
-bool RandomizedFunctor::operator()(const double* x1,
-                                   double* residuals) const {
-  double random_value = static_cast<double>(rand()) /
-      static_cast<double>(RAND_MAX);
+bool RandomizedFunctor::operator()(const double* x1, double* residuals) const {
+  double random_value =
+      static_cast<double>(rand()) / static_cast<double>(RAND_MAX);
 
   // Normalize noise to [-factor, factor].
   random_value *= 2.0;
@@ -244,7 +241,7 @@
 
 void RandomizedFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
     const CostFunction& cost_function) const {
-  std::vector<double> kTests = { 0.0, 1.0, 3.0, 4.0, 50.0 };
+  std::vector<double> kTests = {0.0, 1.0, 3.0, 4.0, 50.0};
 
   const double kTolerance = 2e-4;
 
@@ -252,14 +249,13 @@
   srand(random_seed_);
 
   for (int k = 0; k < kTests.size(); ++k) {
-    double *parameters[] = { &kTests[k] };
+    double* parameters[] = {&kTests[k]};
     double dydx;
-    double *jacobians[1] = { &dydx };
+    double* jacobians[1] = {&dydx};
     double residual;
 
-    ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
-                                       &residual,
-                                       &jacobians[0]));
+    ASSERT_TRUE(
+        cost_function.Evaluate(&parameters[0], &residual, &jacobians[0]));
 
     // Expect residual to be close to x^2 w.r.t. noise factor.
     ExpectClose(residual, kTests[k] * kTests[k], noise_factor_);
diff --git a/internal/ceres/numeric_diff_test_utils.h b/internal/ceres/numeric_diff_test_utils.h
index 33497d9..462553c 100644
--- a/internal/ceres/numeric_diff_test_utils.h
+++ b/internal/ceres/numeric_diff_test_utils.h
@@ -51,8 +51,7 @@
  public:
   bool operator()(const double* x1, const double* x2, double* residuals) const;
   void ExpectCostFunctionEvaluationIsNearlyCorrect(
-      const CostFunction& cost_function,
-      NumericDiffMethodType method) const;
+      const CostFunction& cost_function, NumericDiffMethodType method) const;
 };
 
 class EasyCostFunction : public SizedCostFunction<3, 5, 5> {
@@ -76,8 +75,7 @@
  public:
   bool operator()(const double* x1, const double* x2, double* residuals) const;
   void ExpectCostFunctionEvaluationIsNearlyCorrect(
-      const CostFunction& cost_function,
-      NumericDiffMethodType method) const;
+      const CostFunction& cost_function, NumericDiffMethodType method) const;
 };
 
 class TranscendentalCostFunction : public SizedCostFunction<2, 5, 5> {
@@ -87,6 +85,7 @@
                 double** /* not used */) const final {
     return functor_(parameters[0], parameters[1], residuals);
   }
+
  private:
   TranscendentalFunctor functor_;
 };
@@ -117,8 +116,7 @@
 class RandomizedFunctor {
  public:
   RandomizedFunctor(double noise_factor, unsigned int random_seed)
-      : noise_factor_(noise_factor), random_seed_(random_seed) {
-  }
+      : noise_factor_(noise_factor), random_seed_(random_seed) {}
 
   bool operator()(const double* x1, double* residuals) const;
   void ExpectCostFunctionEvaluationIsNearlyCorrect(
@@ -132,8 +130,7 @@
 class RandomizedCostFunction : public SizedCostFunction<1, 1> {
  public:
   RandomizedCostFunction(double noise_factor, unsigned int random_seed)
-      : functor_(noise_factor, random_seed) {
-  }
+      : functor_(noise_factor, random_seed) {}
 
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -145,7 +142,6 @@
   RandomizedFunctor functor_;
 };
 
-
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/ordered_groups_test.cc b/internal/ceres/ordered_groups_test.cc
index 8cf4324..d613a41 100644
--- a/internal/ceres/ordered_groups_test.cc
+++ b/internal/ceres/ordered_groups_test.cc
@@ -32,6 +32,7 @@
 
 #include <cstddef>
 #include <vector>
+
 #include "gtest/gtest.h"
 
 namespace ceres {
diff --git a/internal/ceres/pair_hash.h b/internal/ceres/pair_hash.h
index 80453ba..abbedcc 100644
--- a/internal/ceres/pair_hash.h
+++ b/internal/ceres/pair_hash.h
@@ -33,10 +33,11 @@
 #ifndef CERES_INTERNAL_PAIR_HASH_H_
 #define CERES_INTERNAL_PAIR_HASH_H_
 
-#include "ceres/internal/port.h"
 #include <cstdint>
 #include <utility>
 
+#include "ceres/internal/port.h"
+
 namespace ceres {
 namespace internal {
 
@@ -53,6 +54,8 @@
 // in 18 cycles if you're lucky. On x86 architectures, this requires 45
 // instructions in 27 cycles, if you're lucky.
 //
+// clang-format off
+//
 // 32bit version
 inline void hash_mix(uint32_t& a, uint32_t& b, uint32_t& c) {
   a -= b; a -= c; a ^= (c>>13);
@@ -78,6 +81,7 @@
   b -= c; b -= a; b ^= (a<<49);
   c -= a; c -= b; c ^= (b>>11);
 }
+// clang-format on
 
 inline uint32_t Hash32NumWithSeed(uint32_t num, uint32_t c) {
   // The golden ratio; an arbitrary value.
diff --git a/internal/ceres/parallel_for_cxx.cc b/internal/ceres/parallel_for_cxx.cc
index 8e358f5..4da40c0 100644
--- a/internal/ceres/parallel_for_cxx.cc
+++ b/internal/ceres/parallel_for_cxx.cc
@@ -33,14 +33,13 @@
 
 #ifdef CERES_USE_CXX_THREADS
 
-#include "ceres/parallel_for.h"
-
 #include <cmath>
 #include <condition_variable>
 #include <memory>
 #include <mutex>
 
 #include "ceres/concurrent_queue.h"
+#include "ceres/parallel_for.h"
 #include "ceres/scoped_thread_token.h"
 #include "ceres/thread_token_provider.h"
 #include "glog/logging.h"
@@ -117,9 +116,7 @@
 
 }  // namespace
 
-int MaxNumThreadsAvailable() {
-  return ThreadPool::MaxNumThreadsAvailable();
-}
+int MaxNumThreadsAvailable() { return ThreadPool::MaxNumThreadsAvailable(); }
 
 // See ParallelFor (below) for more details.
 void ParallelFor(ContextImpl* context,
@@ -141,8 +138,10 @@
     return;
   }
 
-  ParallelFor(context, start, end, num_threads,
-              [&function](int /*thread_id*/, int i) { function(i); });
+  ParallelFor(
+      context, start, end, num_threads, [&function](int /*thread_id*/, int i) {
+        function(i);
+      });
 }
 
 // This implementation uses a fixed size max worker pool with a shared task
@@ -213,8 +212,7 @@
     const int thread_id = scoped_thread_token.token();
 
     // Perform each task.
-    for (int j = shared_state->start + i;
-         j < shared_state->end;
+    for (int j = shared_state->start + i; j < shared_state->end;
          j += shared_state->num_work_items) {
       function(thread_id, j);
     }
@@ -244,4 +242,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif // CERES_USE_CXX_THREADS
+#endif  // CERES_USE_CXX_THREADS
diff --git a/internal/ceres/parallel_for_nothreads.cc b/internal/ceres/parallel_for_nothreads.cc
index e8f450a..d036569 100644
--- a/internal/ceres/parallel_for_nothreads.cc
+++ b/internal/ceres/parallel_for_nothreads.cc
@@ -72,7 +72,7 @@
   }
 }
 
-}
-}
+}  // namespace internal
+}  // namespace ceres
 
 #endif  // CERES_NO_THREADS
diff --git a/internal/ceres/parallel_for_openmp.cc b/internal/ceres/parallel_for_openmp.cc
index 8afe3b1..eb9d905 100644
--- a/internal/ceres/parallel_for_openmp.cc
+++ b/internal/ceres/parallel_for_openmp.cc
@@ -34,7 +34,6 @@
 #if defined(CERES_USE_OPENMP)
 
 #include "ceres/parallel_for.h"
-
 #include "ceres/scoped_thread_token.h"
 #include "ceres/thread_token_provider.h"
 #include "glog/logging.h"
@@ -43,9 +42,7 @@
 namespace ceres {
 namespace internal {
 
-int MaxNumThreadsAvailable() {
-  return omp_get_max_threads();
-}
+int MaxNumThreadsAvailable() { return omp_get_max_threads(); }
 
 void ParallelFor(ContextImpl* context,
                  int start,
diff --git a/internal/ceres/parallel_for_test.cc b/internal/ceres/parallel_for_test.cc
index 04e5783..434f993 100644
--- a/internal/ceres/parallel_for_test.cc
+++ b/internal/ceres/parallel_for_test.cc
@@ -29,7 +29,9 @@
 // Author: vitus@google.com (Michael Vitus)
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include "ceres/parallel_for.h"
 
@@ -64,8 +66,9 @@
 
   for (int num_threads = 1; num_threads <= 8; ++num_threads) {
     std::vector<int> values(size, 0);
-    ParallelFor(&context, 0, size, num_threads,
-                [&values](int i) { values[i] = std::sqrt(i); });
+    ParallelFor(&context, 0, size, num_threads, [&values](int i) {
+      values[i] = std::sqrt(i);
+    });
     EXPECT_THAT(values, ElementsAreArray(expected_results));
   }
 }
@@ -84,8 +87,10 @@
 
   for (int num_threads = 1; num_threads <= 8; ++num_threads) {
     std::vector<int> values(size, 0);
-    ParallelFor(&context, 0, size, num_threads,
-                [&values](int thread_id, int i) { values[i] = std::sqrt(i); });
+    ParallelFor(
+        &context, 0, size, num_threads, [&values](int thread_id, int i) {
+          values[i] = std::sqrt(i);
+        });
     EXPECT_THAT(values, ElementsAreArray(expected_results));
   }
 }
@@ -146,7 +151,10 @@
   std::mutex mutex;
   std::condition_variable condition;
   int count = 0;
-  ParallelFor(&context, 0, 2, 2,
+  ParallelFor(&context,
+              0,
+              2,
+              2,
               [&x, &mutex, &condition, &count](int thread_id, int i) {
                 std::unique_lock<std::mutex> lock(mutex);
                 x[i] = thread_id;
@@ -155,7 +163,7 @@
                 condition.wait(lock, [&]() { return count == 2; });
               });
 
-  EXPECT_THAT(x, UnorderedElementsAreArray({0,1}));
+  EXPECT_THAT(x, UnorderedElementsAreArray({0, 1}));
 }
 #endif  // CERES_NO_THREADS
 
diff --git a/internal/ceres/parallel_utils_test.cc b/internal/ceres/parallel_utils_test.cc
index f997d25..53870bb 100644
--- a/internal/ceres/parallel_utils_test.cc
+++ b/internal/ceres/parallel_utils_test.cc
@@ -29,7 +29,10 @@
 // Author: wjr@google.com (William Rucklidge)
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
+
 #include "ceres/parallel_utils.h"
 
 #include "glog/logging.h"
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
index ef521c0..9899c24 100644
--- a/internal/ceres/parameter_block_ordering.cc
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -50,11 +50,11 @@
 using std::vector;
 
 int ComputeStableSchurOrdering(const Program& program,
-                         vector<ParameterBlock*>* ordering) {
+                               vector<ParameterBlock*>* ordering) {
   CHECK(ordering != nullptr);
   ordering->clear();
   EventLogger event_logger("ComputeStableSchurOrdering");
-  std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+  std::unique_ptr<Graph<ParameterBlock*>> graph(CreateHessianGraph(program));
   event_logger.AddEvent("CreateHessianGraph");
 
   const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
@@ -86,7 +86,7 @@
   CHECK(ordering != nullptr);
   ordering->clear();
 
-  std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+  std::unique_ptr<Graph<ParameterBlock*>> graph(CreateHessianGraph(program));
   int independent_set_size = IndependentSetOrdering(*graph, ordering);
   const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
 
@@ -106,7 +106,7 @@
   CHECK(ordering != nullptr);
   ordering->Clear();
   const vector<ParameterBlock*> parameter_blocks = program.parameter_blocks();
-  std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+  std::unique_ptr<Graph<ParameterBlock*>> graph(CreateHessianGraph(program));
 
   int num_covered = 0;
   int round = 0;
diff --git a/internal/ceres/parameter_block_ordering.h b/internal/ceres/parameter_block_ordering.h
index f996929..7b58afd 100644
--- a/internal/ceres/parameter_block_ordering.h
+++ b/internal/ceres/parameter_block_ordering.h
@@ -32,8 +32,9 @@
 #define CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
 
 #include <vector>
-#include "ceres/ordered_groups.h"
+
 #include "ceres/graph.h"
+#include "ceres/ordered_groups.h"
 #include "ceres/types.h"
 
 namespace ceres {
@@ -56,13 +57,13 @@
 //             complement of the independent set,
 //             fixed blocks]
 int ComputeSchurOrdering(const Program& program,
-                         std::vector<ParameterBlock* >* ordering);
+                         std::vector<ParameterBlock*>* ordering);
 
 // Same as above, except that ties while computing the independent set
 // ordering are resolved in favour of the order in which the parameter
 // blocks occur in the program.
 int ComputeStableSchurOrdering(const Program& program,
-                               std::vector<ParameterBlock* >* ordering);
+                               std::vector<ParameterBlock*>* ordering);
 
 // Use an approximate independent set ordering to decompose the
 // parameter blocks of a problem in a sequence of independent
diff --git a/internal/ceres/parameter_block_ordering_test.cc b/internal/ceres/parameter_block_ordering_test.cc
index 24dfdc9..1078893 100644
--- a/internal/ceres/parameter_block_ordering_test.cc
+++ b/internal/ceres/parameter_block_ordering_test.cc
@@ -61,7 +61,7 @@
 };
 
 class SchurOrderingTest : public ::testing::Test {
- protected :
+ protected:
   void SetUp() final {
     // The explicit calls to AddParameterBlock are necessary because
     // the below tests depend on the specific numbering of the
@@ -75,8 +75,8 @@
     problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, NULL, z_, y_);
     problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, NULL, x_, z_);
     problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, NULL, z_, x_);
-    problem_.AddResidualBlock(new DummyCostFunction<1, 5, 3, 6>, NULL,
-                              z_, x_, w_);
+    problem_.AddResidualBlock(
+        new DummyCostFunction<1, 5, 3, 6>, NULL, z_, x_, w_);
   }
 
   ProblemImpl problem_;
diff --git a/internal/ceres/parameter_dims_test.cc b/internal/ceres/parameter_dims_test.cc
index c832260..ee3be8f 100644
--- a/internal/ceres/parameter_dims_test.cc
+++ b/internal/ceres/parameter_dims_test.cc
@@ -25,6 +25,7 @@
 #include "ceres/internal/parameter_dims.h"
 
 #include <gtest/gtest.h>
+
 #include <type_traits>
 #include <utility>
 
@@ -35,15 +36,15 @@
 static_assert(IsValidParameterDimensionSequence(std::integer_sequence<int>()) ==
                   true,
               "Unit test of is valid parameter dimension sequence failed.");
-static_assert(
-    IsValidParameterDimensionSequence(std::integer_sequence<int, 2, 1>()) == true,
-    "Unit test of is valid parameter dimension sequence failed.");
-static_assert(
-    IsValidParameterDimensionSequence(std::integer_sequence<int, 0, 1>()) == false,
-    "Unit test of is valid parameter dimension sequence failed.");
-static_assert(
-    IsValidParameterDimensionSequence(std::integer_sequence<int, 3, 0>()) == false,
-    "Unit test of is valid parameter dimension sequence failed.");
+static_assert(IsValidParameterDimensionSequence(
+                  std::integer_sequence<int, 2, 1>()) == true,
+              "Unit test of is valid parameter dimension sequence failed.");
+static_assert(IsValidParameterDimensionSequence(
+                  std::integer_sequence<int, 0, 1>()) == false,
+              "Unit test of is valid parameter dimension sequence failed.");
+static_assert(IsValidParameterDimensionSequence(
+                  std::integer_sequence<int, 3, 0>()) == false,
+              "Unit test of is valid parameter dimension sequence failed.");
 
 // Static parameter dims unit test
 static_assert(
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
index 3853ea1..b8ac3b2 100644
--- a/internal/ceres/partitioned_matrix_view.h
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -98,12 +98,14 @@
   virtual void UpdateBlockDiagonalFtF(
       BlockSparseMatrix* block_diagonal) const = 0;
 
+  // clang-format off
   virtual int num_col_blocks_e() const = 0;
   virtual int num_col_blocks_f() const = 0;
   virtual int num_cols_e()       const = 0;
   virtual int num_cols_f()       const = 0;
   virtual int num_rows()         const = 0;
   virtual int num_cols()         const = 0;
+  // clang-format on
 
   static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options,
                                            const BlockSparseMatrix& matrix);
@@ -111,7 +113,7 @@
 
 template <int kRowBlockSize = Eigen::Dynamic,
           int kEBlockSize = Eigen::Dynamic,
-          int kFBlockSize = Eigen::Dynamic >
+          int kFBlockSize = Eigen::Dynamic>
 class PartitionedMatrixView : public PartitionedMatrixViewBase {
  public:
   // matrix = [E F], where the matrix E contains the first
@@ -127,12 +129,14 @@
   BlockSparseMatrix* CreateBlockDiagonalFtF() const final;
   void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const final;
   void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const final;
+  // clang-format off
   int num_col_blocks_e() const final { return num_col_blocks_e_;  }
   int num_col_blocks_f() const final { return num_col_blocks_f_;  }
   int num_cols_e()       const final { return num_cols_e_;        }
   int num_cols_f()       const final { return num_cols_f_;        }
   int num_rows()         const final { return matrix_.num_rows(); }
   int num_cols()         const final { return matrix_.num_cols(); }
+  // clang-format on
 
  private:
   BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
index f3f548c..0b6a57f 100644
--- a/internal/ceres/partitioned_matrix_view_impl.h
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -28,14 +28,14 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#include "ceres/partitioned_matrix_view.h"
-
 #include <algorithm>
 #include <cstring>
 #include <vector>
+
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/block_structure.h"
 #include "ceres/internal/eigen.h"
+#include "ceres/partitioned_matrix_view.h"
 #include "ceres/small_blas.h"
 #include "glog/logging.h"
 
@@ -44,11 +44,8 @@
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-PartitionedMatrixView(
-    const BlockSparseMatrix& matrix,
-    int num_col_blocks_e)
-    : matrix_(matrix),
-      num_col_blocks_e_(num_col_blocks_e) {
+    PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e)
+    : matrix_(matrix), num_col_blocks_e_(num_col_blocks_e) {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
   CHECK(bs != nullptr);
 
@@ -85,8 +82,7 @@
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-~PartitionedMatrixView() {
-}
+    ~PartitionedMatrixView() {}
 
 // The next four methods don't seem to be particularly cache
 // friendly. This is an artifact of how the BlockStructure of the
@@ -94,9 +90,8 @@
 // multithreading as well as improved data layout.
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-RightMultiplyE(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    RightMultiplyE(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over the first num_row_blocks_e_ row blocks, and multiply
@@ -109,17 +104,18 @@
     const int col_block_id = cell.block_id;
     const int col_block_pos = bs->cols[col_block_id].position;
     const int col_block_size = bs->cols[col_block_id].size;
+    // clang-format off
     MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
         values + cell.position, row_block_size, col_block_size,
         x + col_block_pos,
         y + row_block_pos);
+    // clang-format on
   }
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-RightMultiplyF(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    RightMultiplyF(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over row blocks, and if the row block is in E, then
@@ -136,10 +132,12 @@
       const int col_block_id = cells[c].block_id;
       const int col_block_pos = bs->cols[col_block_id].position;
       const int col_block_size = bs->cols[col_block_id].size;
+      // clang-format off
       MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
           values + cells[c].position, row_block_size, col_block_size,
           x + col_block_pos - num_cols_e_,
           y + row_block_pos);
+      // clang-format on
     }
   }
 
@@ -151,18 +149,19 @@
       const int col_block_id = cells[c].block_id;
       const int col_block_pos = bs->cols[col_block_id].position;
       const int col_block_size = bs->cols[col_block_id].size;
+      // clang-format off
       MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
           values + cells[c].position, row_block_size, col_block_size,
           x + col_block_pos - num_cols_e_,
           y + row_block_pos);
+      // clang-format on
     }
   }
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-LeftMultiplyE(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    LeftMultiplyE(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over the first num_row_blocks_e_ row blocks, and multiply
@@ -175,17 +174,18 @@
     const int col_block_id = cell.block_id;
     const int col_block_pos = bs->cols[col_block_id].position;
     const int col_block_size = bs->cols[col_block_id].size;
+    // clang-format off
     MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
         values + cell.position, row_block_size, col_block_size,
         x + row_block_pos,
         y + col_block_pos);
+    // clang-format on
   }
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-LeftMultiplyF(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    LeftMultiplyF(const double* x, double* y) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
 
   // Iterate over row blocks, and if the row block is in E, then
@@ -202,10 +202,12 @@
       const int col_block_id = cells[c].block_id;
       const int col_block_pos = bs->cols[col_block_id].position;
       const int col_block_size = bs->cols[col_block_id].size;
+      // clang-format off
       MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
         values + cells[c].position, row_block_size, col_block_size,
         x + row_block_pos,
         y + col_block_pos - num_cols_e_);
+      // clang-format on
     }
   }
 
@@ -217,10 +219,12 @@
       const int col_block_id = cells[c].block_id;
       const int col_block_pos = bs->cols[col_block_id].position;
       const int col_block_size = bs->cols[col_block_id].size;
+      // clang-format off
       MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
         values + cells[c].position, row_block_size, col_block_size,
         x + row_block_pos,
         y + col_block_pos - num_cols_e_);
+      // clang-format on
     }
   }
 }
@@ -233,7 +237,8 @@
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
 BlockSparseMatrix*
 PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const {
+    CreateBlockDiagonalMatrixLayout(int start_col_block,
+                                    int end_col_block) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
   CompressedRowBlockStructure* block_diagonal_structure =
       new CompressedRowBlockStructure;
@@ -269,9 +274,10 @@
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix*
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-CreateBlockDiagonalEtE() const {
+BlockSparseMatrix* PartitionedMatrixView<kRowBlockSize,
+                                         kEBlockSize,
+                                         kFBlockSize>::CreateBlockDiagonalEtE()
+    const {
   BlockSparseMatrix* block_diagonal =
       CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
   UpdateBlockDiagonalEtE(block_diagonal);
@@ -279,12 +285,12 @@
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix*
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-CreateBlockDiagonalFtF() const {
-  BlockSparseMatrix* block_diagonal =
-      CreateBlockDiagonalMatrixLayout(
-          num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
+BlockSparseMatrix* PartitionedMatrixView<kRowBlockSize,
+                                         kEBlockSize,
+                                         kFBlockSize>::CreateBlockDiagonalFtF()
+    const {
+  BlockSparseMatrix* block_diagonal = CreateBlockDiagonalMatrixLayout(
+      num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
   UpdateBlockDiagonalFtF(block_diagonal);
   return block_diagonal;
 }
@@ -295,17 +301,15 @@
 //    block_diagonal = block_diagonal(E'E)
 //
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-UpdateBlockDiagonalEtE(
-    BlockSparseMatrix* block_diagonal) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
   const CompressedRowBlockStructure* block_diagonal_structure =
       block_diagonal->block_structure();
 
   block_diagonal->SetZero();
   const double* values = matrix_.values();
-  for (int r = 0; r < num_row_blocks_e_ ; ++r) {
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
     const Cell& cell = bs->rows[r].cells[0];
     const int row_block_size = bs->rows[r].block.size;
     const int block_id = cell.block_id;
@@ -313,12 +317,14 @@
     const int cell_position =
         block_diagonal_structure->rows[block_id].cells[0].position;
 
+    // clang-format off
     MatrixTransposeMatrixMultiply
         <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
             values + cell.position, row_block_size, col_block_size,
             values + cell.position, row_block_size, col_block_size,
             block_diagonal->mutable_values() + cell_position,
             0, 0, col_block_size, col_block_size);
+    // clang-format on
   }
 }
 
@@ -328,9 +334,8 @@
 //   block_diagonal = block_diagonal(F'F)
 //
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
   const CompressedRowBlockStructure* bs = matrix_.block_structure();
   const CompressedRowBlockStructure* block_diagonal_structure =
       block_diagonal->block_structure();
@@ -347,12 +352,14 @@
       const int cell_position =
           block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
 
+      // clang-format off
       MatrixTransposeMatrixMultiply
           <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
               values + cells[c].position, row_block_size, col_block_size,
               values + cells[c].position, row_block_size, col_block_size,
               block_diagonal->mutable_values() + cell_position,
               0, 0, col_block_size, col_block_size);
+      // clang-format on
     }
   }
 
@@ -366,12 +373,14 @@
       const int cell_position =
           block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
 
+      // clang-format off
       MatrixTransposeMatrixMultiply
           <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
               values + cells[c].position, row_block_size, col_block_size,
               values + cells[c].position, row_block_size, col_block_size,
               block_diagonal->mutable_values() + cell_position,
               0, 0, col_block_size, col_block_size);
+      // clang-format on
     }
   }
 }
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
index 827cfb7..b66d0b8 100644
--- a/internal/ceres/partitioned_matrix_view_test.cc
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -32,6 +32,7 @@
 
 #include <memory>
 #include <vector>
+
 #include "ceres/block_structure.h"
 #include "ceres/casts.h"
 #include "ceres/internal/eigen.h"
@@ -47,7 +48,7 @@
 const double kEpsilon = 1e-14;
 
 class PartitionedMatrixViewTest : public ::testing::Test {
- protected :
+ protected:
   void SetUp() final {
     srand(5);
     std::unique_ptr<LinearLeastSquaresProblem> problem(
@@ -61,8 +62,7 @@
     LinearSolver::Options options;
     options.elimination_groups.push_back(num_eliminate_blocks_);
     pmv_.reset(PartitionedMatrixViewBase::Create(
-                   options,
-                   *down_cast<BlockSparseMatrix*>(A_.get())));
+        options, *down_cast<BlockSparseMatrix*>(A_.get())));
   }
 
   int num_rows_;
@@ -143,9 +143,9 @@
 }
 
 TEST_F(PartitionedMatrixViewTest, BlockDiagonalEtE) {
-  std::unique_ptr<BlockSparseMatrix>
-      block_diagonal_ee(pmv_->CreateBlockDiagonalEtE());
-  const CompressedRowBlockStructure* bs  = block_diagonal_ee->block_structure();
+  std::unique_ptr<BlockSparseMatrix> block_diagonal_ee(
+      pmv_->CreateBlockDiagonalEtE());
+  const CompressedRowBlockStructure* bs = block_diagonal_ee->block_structure();
 
   EXPECT_EQ(block_diagonal_ee->num_rows(), 2);
   EXPECT_EQ(block_diagonal_ee->num_cols(), 2);
@@ -157,9 +157,9 @@
 }
 
 TEST_F(PartitionedMatrixViewTest, BlockDiagonalFtF) {
-  std::unique_ptr<BlockSparseMatrix>
-      block_diagonal_ff(pmv_->CreateBlockDiagonalFtF());
-  const CompressedRowBlockStructure* bs  = block_diagonal_ff->block_structure();
+  std::unique_ptr<BlockSparseMatrix> block_diagonal_ff(
+      pmv_->CreateBlockDiagonalFtF());
+  const CompressedRowBlockStructure* bs = block_diagonal_ff->block_structure();
 
   EXPECT_EQ(block_diagonal_ff->num_rows(), 3);
   EXPECT_EQ(block_diagonal_ff->num_cols(), 3);
diff --git a/internal/ceres/polynomial.h b/internal/ceres/polynomial.h
index 3e09bae..40e4150 100644
--- a/internal/ceres/polynomial.h
+++ b/internal/ceres/polynomial.h
@@ -33,6 +33,7 @@
 #define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
 
 #include <vector>
+
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/port.h"
 
diff --git a/internal/ceres/polynomial_test.cc b/internal/ceres/polynomial_test.cc
index 00c8534..0ff73ea 100644
--- a/internal/ceres/polynomial_test.cc
+++ b/internal/ceres/polynomial_test.cc
@@ -31,13 +31,14 @@
 
 #include "ceres/polynomial.h"
 
-#include <limits>
+#include <algorithm>
 #include <cmath>
 #include <cstddef>
-#include <algorithm>
-#include "gtest/gtest.h"
+#include <limits>
+
 #include "ceres/function_sample.h"
 #include "ceres/test_util.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -74,7 +75,7 @@
   // Multiply poly by x^2 - 2real + abs(real,imag)^2
   poly2.head(poly.size()) += poly;
   poly2.segment(1, poly.size()) -= 2 * real * poly;
-  poly2.tail(poly.size()) += (real*real + imag*imag) * poly;
+  poly2.tail(poly.size()) += (real * real + imag * imag) * poly;
   return poly2;
 }
 
@@ -90,7 +91,7 @@
 // If use_real is false, NULL is passed as the real argument to
 // FindPolynomialRoots. If use_imaginary is false, NULL is passed as the
 // imaginary argument to FindPolynomialRoots.
-template<int N>
+template <int N>
 void RunPolynomialTestRealRoots(const double (&real_roots)[N],
                                 bool use_real,
                                 bool use_imaginary,
@@ -142,32 +143,32 @@
 }
 
 TEST(Polynomial, LinearPolynomialWithPositiveRootWorks) {
-  const double roots[1] = { 42.42 };
+  const double roots[1] = {42.42};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, LinearPolynomialWithNegativeRootWorks) {
-  const double roots[1] = { -42.42 };
+  const double roots[1] = {-42.42};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, QuadraticPolynomialWithPositiveRootsWorks) {
-  const double roots[2] = { 1.0, 42.42 };
+  const double roots[2] = {1.0, 42.42};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, QuadraticPolynomialWithOneNegativeRootWorks) {
-  const double roots[2] = { -42.42, 1.0 };
+  const double roots[2] = {-42.42, 1.0};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, QuadraticPolynomialWithTwoNegativeRootsWorks) {
-  const double roots[2] = { -42.42, -1.0 };
+  const double roots[2] = {-42.42, -1.0};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, QuadraticPolynomialWithCloseRootsWorks) {
-  const double roots[2] = { 42.42, 42.43 };
+  const double roots[2] = {42.42, 42.43};
   RunPolynomialTestRealRoots(roots, true, false, kEpsilonLoose);
 }
 
@@ -190,37 +191,37 @@
 }
 
 TEST(Polynomial, QuarticPolynomialWorks) {
-  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  const double roots[4] = {1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, QuarticPolynomialWithTwoClustersOfCloseRootsWorks) {
-  const double roots[4] = { 1.23e-1, 2.46e-1, 1.23e+5, 2.46e+5 };
+  const double roots[4] = {1.23e-1, 2.46e-1, 1.23e+5, 2.46e+5};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilonLoose);
 }
 
 TEST(Polynomial, QuarticPolynomialWithTwoZeroRootsWorks) {
-  const double roots[4] = { -42.42, 0.0, 0.0, 42.42 };
+  const double roots[4] = {-42.42, 0.0, 0.0, 42.42};
   RunPolynomialTestRealRoots(roots, true, true, 2 * kEpsilonLoose);
 }
 
 TEST(Polynomial, QuarticMonomialWorks) {
-  const double roots[4] = { 0.0, 0.0, 0.0, 0.0 };
+  const double roots[4] = {0.0, 0.0, 0.0, 0.0};
   RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
 }
 
 TEST(Polynomial, NullPointerAsImaginaryPartWorks) {
-  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  const double roots[4] = {1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5};
   RunPolynomialTestRealRoots(roots, true, false, kEpsilon);
 }
 
 TEST(Polynomial, NullPointerAsRealPartWorks) {
-  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  const double roots[4] = {1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5};
   RunPolynomialTestRealRoots(roots, false, true, kEpsilon);
 }
 
 TEST(Polynomial, BothOutputArgumentsNullWorks) {
-  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  const double roots[4] = {1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5};
   RunPolynomialTestRealRoots(roots, false, false, kEpsilon);
 }
 
@@ -279,7 +280,6 @@
   EXPECT_EQ(optimal_value, 2.0);
 }
 
-
 TEST(Polynomial, MinimizeQuadraticPolynomial) {
   // p(x) = x^2 - 3 x + 2
   // min_x = 3/2
@@ -294,8 +294,8 @@
   double min_x = -2.0;
   double max_x = 2.0;
   MinimizePolynomial(polynomial, min_x, max_x, &optimal_x, &optimal_value);
-  EXPECT_EQ(optimal_x, 3.0/2.0);
-  EXPECT_EQ(optimal_value, -1.0/4.0);
+  EXPECT_EQ(optimal_x, 3.0 / 2.0);
+  EXPECT_EQ(optimal_value, -1.0 / 4.0);
 
   min_x = -2.0;
   max_x = 1.0;
@@ -402,7 +402,6 @@
   EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-14);
 }
 
-
 TEST(Polynomial, CubicInterpolatingPolynomialFromValues) {
   // p(x) = x^3 + 2x^2 + 3x + 2
   Vector true_polynomial(4);
diff --git a/internal/ceres/preconditioner.cc b/internal/ceres/preconditioner.cc
index f98374e..69ba04d 100644
--- a/internal/ceres/preconditioner.cc
+++ b/internal/ceres/preconditioner.cc
@@ -29,13 +29,13 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/preconditioner.h"
+
 #include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
-Preconditioner::~Preconditioner() {
-}
+Preconditioner::~Preconditioner() {}
 
 PreconditionerType Preconditioner::PreconditionerForZeroEBlocks(
     PreconditionerType preconditioner_type) {
@@ -53,8 +53,7 @@
   CHECK(matrix != nullptr);
 }
 
-SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() {
-}
+SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() {}
 
 bool SparseMatrixPreconditionerWrapper::UpdateImpl(const SparseMatrix& A,
                                                    const double* D) {
@@ -66,7 +65,7 @@
   matrix_->RightMultiply(x, y);
 }
 
-int  SparseMatrixPreconditionerWrapper::num_rows() const {
+int SparseMatrixPreconditionerWrapper::num_rows() const {
   return matrix_->num_rows();
 }
 
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index 3e46ed8..b10364b 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_PRECONDITIONER_H_
 
 #include <vector>
+
 #include "ceres/casts.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/context_impl.h"
@@ -50,7 +51,8 @@
   struct Options {
     PreconditionerType type = JACOBI;
     VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
-    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = SUITE_SPARSE;
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+        SUITE_SPARSE;
 
     // When using the subset preconditioner, all row blocks starting
     // from this row block are used to construct the preconditioner.
@@ -137,9 +139,7 @@
   }
 
   int num_rows() const override = 0;
-  int num_cols() const override {
-    return num_rows();
-  }
+  int num_cols() const override { return num_rows(); }
 };
 
 // This templated subclass of Preconditioner serves as a base class for
@@ -159,9 +159,11 @@
 
 // Preconditioners that depend on access to the low level structure
 // of a SparseMatrix.
-typedef TypedPreconditioner<SparseMatrix>              SparseMatrixPreconditioner;               // NOLINT
-typedef TypedPreconditioner<BlockSparseMatrix>         BlockSparseMatrixPreconditioner;          // NOLINT
-typedef TypedPreconditioner<CompressedRowSparseMatrix> CompressedRowSparseMatrixPreconditioner;  // NOLINT
+// clang-format off
+typedef TypedPreconditioner<SparseMatrix>              SparseMatrixPreconditioner;
+typedef TypedPreconditioner<BlockSparseMatrix>         BlockSparseMatrixPreconditioner;
+typedef TypedPreconditioner<CompressedRowSparseMatrix> CompressedRowSparseMatrixPreconditioner;
+// clang-format on
 
 // Wrap a SparseMatrix object as a preconditioner.
 class SparseMatrixPreconditionerWrapper : public SparseMatrixPreconditioner {
diff --git a/internal/ceres/preprocessor.cc b/internal/ceres/preprocessor.cc
index 0221914..6a67d38 100644
--- a/internal/ceres/preprocessor.cc
+++ b/internal/ceres/preprocessor.cc
@@ -28,11 +28,12 @@
 //
 // Author: sameragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/preprocessor.h"
+
 #include "ceres/callbacks.h"
 #include "ceres/gradient_checking_cost_function.h"
 #include "ceres/line_search_preprocessor.h"
 #include "ceres/parallel_for.h"
-#include "ceres/preprocessor.h"
 #include "ceres/problem_impl.h"
 #include "ceres/solver.h"
 #include "ceres/trust_region_preprocessor.h"
@@ -53,17 +54,15 @@
   return NULL;
 }
 
-Preprocessor::~Preprocessor() {
-}
+Preprocessor::~Preprocessor() {}
 
 void ChangeNumThreadsIfNeeded(Solver::Options* options) {
   const int num_threads_available = MaxNumThreadsAvailable();
   if (options->num_threads > num_threads_available) {
-    LOG(WARNING)
-        << "Specified options.num_threads: " << options->num_threads
-        << " exceeds maximum available from the threading model Ceres "
-        << "was compiled with: " << num_threads_available
-        << ".  Bounding to maximum number available.";
+    LOG(WARNING) << "Specified options.num_threads: " << options->num_threads
+                 << " exceeds maximum available from the threading model Ceres "
+                 << "was compiled with: " << num_threads_available
+                 << ".  Bounding to maximum number available.";
     options->num_threads = num_threads_available;
   }
 }
@@ -83,16 +82,15 @@
   minimizer_options.evaluator = pp->evaluator;
 
   if (options.logging_type != SILENT) {
-    pp->logging_callback.reset(
-        new LoggingCallback(options.minimizer_type,
-                            options.minimizer_progress_to_stdout));
+    pp->logging_callback.reset(new LoggingCallback(
+        options.minimizer_type, options.minimizer_progress_to_stdout));
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
                                        pp->logging_callback.get());
   }
 
   if (options.update_state_every_iteration) {
     pp->state_updating_callback.reset(
-      new StateUpdatingCallback(program, reduced_parameters));
+        new StateUpdatingCallback(program, reduced_parameters));
     // This must get pushed to the front of the callbacks so that it
     // is run before any of the user callbacks.
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
diff --git a/internal/ceres/preprocessor.h b/internal/ceres/preprocessor.h
index 99bd6c0..e69c790 100644
--- a/internal/ceres/preprocessor.h
+++ b/internal/ceres/preprocessor.h
@@ -80,9 +80,7 @@
 // A PreprocessedProblem is the result of running the Preprocessor on
 // a Problem and Solver::Options object.
 struct PreprocessedProblem {
-  PreprocessedProblem()
-      : fixed_cost(0.0) {
-  }
+  PreprocessedProblem() : fixed_cost(0.0) {}
 
   std::string error;
   Solver::Options options;
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
index 9805891..5129b9a 100644
--- a/internal/ceres/problem_test.cc
+++ b/internal/ceres/problem_test.cc
@@ -48,8 +48,8 @@
 #include "ceres/sized_cost_function.h"
 #include "ceres/sparse_matrix.h"
 #include "ceres/types.h"
-#include "gtest/gtest.h"
 #include "gmock/gmock.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -80,7 +80,7 @@
 };
 
 // Trivial cost function that accepts two arguments.
-class BinaryCostFunction: public CostFunction {
+class BinaryCostFunction : public CostFunction {
  public:
   BinaryCostFunction(int num_residuals,
                      int32_t parameter_block1_size,
@@ -101,7 +101,7 @@
 };
 
 // Trivial cost function that accepts three arguments.
-class TernaryCostFunction: public CostFunction {
+class TernaryCostFunction : public CostFunction {
  public:
   TernaryCostFunction(int num_residuals,
                       int32_t parameter_block1_size,
@@ -123,7 +123,6 @@
   }
 };
 
-
 TEST(Problem, MoveConstructor) {
   Problem src;
   double x;
@@ -172,23 +171,23 @@
 
   Problem problem;
   problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
-                                new UnaryCostFunction(
-                                    2, 4 /* 4 != 3 */), NULL, x),
-                            "different block sizes");
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.AddResidualBlock(
+          new UnaryCostFunction(2, 4 /* 4 != 3 */), NULL, x),
+      "different block sizes");
 }
 
 TEST(Problem, AddResidualWithDuplicateParametersDies) {
   double x[3], z[5];
 
   Problem problem;
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
-                                new BinaryCostFunction(2, 3, 3), NULL, x, x),
-                            "Duplicate parameter blocks");
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
-                                new TernaryCostFunction(1, 5, 3, 5),
-                                NULL, z, x, z),
-                            "Duplicate parameter blocks");
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.AddResidualBlock(new BinaryCostFunction(2, 3, 3), NULL, x, x),
+      "Duplicate parameter blocks");
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.AddResidualBlock(
+          new TernaryCostFunction(1, 5, 3, 5), NULL, z, x, z),
+      "Duplicate parameter blocks");
 }
 
 TEST(Problem, AddResidualWithIncorrectSizesOfParameterBlockDies) {
@@ -201,9 +200,9 @@
 
   // The cost function expects the size of the second parameter, z, to be 4
   // instead of 5 as declared above. This is fatal.
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
-      new BinaryCostFunction(2, 3, 4), NULL, x, z),
-               "different block sizes");
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.AddResidualBlock(new BinaryCostFunction(2, 3, 4), NULL, x, z),
+      "different block sizes");
 }
 
 TEST(Problem, AddResidualAddsDuplicatedParametersOnlyOnce) {
@@ -230,7 +229,7 @@
                             "different block sizes");
 }
 
-static double *IntToPtr(int i) {
+static double* IntToPtr(int i) {
   return reinterpret_cast<double*>(sizeof(double) * i);  // NOLINT
 }
 
@@ -246,16 +245,16 @@
   // ones marked with o==o and aliasing ones marked with o--o.
 
   Problem problem;
-  problem.AddParameterBlock(IntToPtr(5),  5);  // x
+  problem.AddParameterBlock(IntToPtr(5), 5);   // x
   problem.AddParameterBlock(IntToPtr(13), 3);  // y
 
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 2),
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(4), 2),
                             "Aliasing detected");
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 3),
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(4), 3),
                             "Aliasing detected");
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 9),
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(4), 9),
                             "Aliasing detected");
-  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 8), 3),
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(8), 3),
                             "Aliasing detected");
   EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(12), 2),
                             "Aliasing detected");
@@ -263,7 +262,7 @@
                             "Aliasing detected");
 
   // These ones should work.
-  problem.AddParameterBlock(IntToPtr( 2), 3);
+  problem.AddParameterBlock(IntToPtr(2), 3);
   problem.AddParameterBlock(IntToPtr(10), 3);
   problem.AddParameterBlock(IntToPtr(16), 2);
 
@@ -306,17 +305,20 @@
   EXPECT_EQ(7, problem.NumParameters());
 
   problem.AddParameterBlock(z, 5);
-  EXPECT_EQ(3,  problem.NumParameterBlocks());
+  EXPECT_EQ(3, problem.NumParameterBlocks());
   EXPECT_EQ(12, problem.NumParameters());
 
   // Add a parameter that has a local parameterization.
-  w[0] = 1.0; w[1] = 0.0; w[2] = 0.0; w[3] = 0.0;
+  w[0] = 1.0;
+  w[1] = 0.0;
+  w[2] = 0.0;
+  w[3] = 0.0;
   problem.AddParameterBlock(w, 4, new QuaternionParameterization);
-  EXPECT_EQ(4,  problem.NumParameterBlocks());
+  EXPECT_EQ(4, problem.NumParameterBlocks());
   EXPECT_EQ(16, problem.NumParameters());
 
   problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
-  problem.AddResidualBlock(new BinaryCostFunction(6, 5, 4) , NULL, z, y);
+  problem.AddResidualBlock(new BinaryCostFunction(6, 5, 4), NULL, z, y);
   problem.AddResidualBlock(new BinaryCostFunction(3, 3, 5), NULL, x, z);
   problem.AddResidualBlock(new BinaryCostFunction(7, 5, 3), NULL, z, x);
   problem.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4), NULL, z, x, y);
@@ -328,12 +330,10 @@
 
 class DestructorCountingCostFunction : public SizedCostFunction<3, 4, 5> {
  public:
-  explicit DestructorCountingCostFunction(int *num_destructions)
+  explicit DestructorCountingCostFunction(int* num_destructions)
       : num_destructions_(num_destructions) {}
 
-  virtual ~DestructorCountingCostFunction() {
-    *num_destructions_ += 1;
-  }
+  virtual ~DestructorCountingCostFunction() { *num_destructions_ += 1; }
 
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -463,8 +463,7 @@
   // The next block of functions until the end are only for testing the
   // residual block removals.
   void ExpectParameterBlockContainsResidualBlock(
-      double* values,
-      ResidualBlock* residual_block) {
+      double* values, ResidualBlock* residual_block) {
     ParameterBlock* parameter_block =
         FindOrDie(problem->parameter_map(), values);
     EXPECT_TRUE(ContainsKey(*(parameter_block->mutable_residual_blocks()),
@@ -478,12 +477,9 @@
   }
 
   // Degenerate case.
-  void ExpectParameterBlockContains(double* values) {
-    ExpectSize(values, 0);
-  }
+  void ExpectParameterBlockContains(double* values) { ExpectSize(values, 0); }
 
-  void ExpectParameterBlockContains(double* values,
-                                    ResidualBlock* r1) {
+  void ExpectParameterBlockContains(double* values, ResidualBlock* r1) {
     ExpectSize(values, 1);
     ExpectParameterBlockContainsResidualBlock(values, r1);
   }
@@ -598,8 +594,8 @@
   Problem problem;
   problem.AddParameterBlock(x, 3);
 
-  EXPECT_DEATH_IF_SUPPORTED(
-      problem.RemoveParameterBlock(y), "Parameter block not found:");
+  EXPECT_DEATH_IF_SUPPORTED(problem.RemoveParameterBlock(y),
+                            "Parameter block not found:");
 }
 
 TEST(Problem, GetParameterization) {
@@ -610,7 +606,7 @@
   problem.AddParameterBlock(x, 3);
   problem.AddParameterBlock(y, 2);
 
-  LocalParameterization* parameterization =  new IdentityParameterization(3);
+  LocalParameterization* parameterization = new IdentityParameterization(3);
   problem.SetParameterization(x, parameterization);
   EXPECT_EQ(problem.GetParameterization(x), parameterization);
   EXPECT_TRUE(problem.GetParameterization(y) == NULL);
@@ -626,8 +622,7 @@
   vector<int> constant_parameters;
   constant_parameters.push_back(0);
   problem.SetParameterization(
-      x,
-      new SubsetParameterization(3, constant_parameters));
+      x, new SubsetParameterization(3, constant_parameters));
   EXPECT_EQ(problem.ParameterBlockSize(x), 3);
   EXPECT_EQ(problem.ParameterBlockLocalSize(x), 2);
   EXPECT_EQ(problem.ParameterBlockLocalSize(y), 4);
@@ -714,6 +709,8 @@
   EXPECT_EQ(z, GetParameterBlock(1)->user_state());
   EXPECT_EQ(w, GetParameterBlock(2)->user_state());
 
+  // clang-format off
+
   // Add all combinations of cost functions.
   CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
   CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
@@ -764,6 +761,8 @@
   problem->RemoveParameterBlock(y);
   EXPECT_EQ(0, problem->NumParameterBlocks());
   EXPECT_EQ(0, NumResidualBlocks());
+
+  // clang-format on
 }
 
 TEST_P(DynamicProblem, RemoveResidualBlock) {
@@ -771,6 +770,8 @@
   problem->AddParameterBlock(z, 5);
   problem->AddParameterBlock(w, 3);
 
+  // clang-format off
+
   // Add all combinations of cost functions.
   CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
   CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
@@ -885,6 +886,8 @@
     ExpectParameterBlockContains(z);
     ExpectParameterBlockContains(w);
   }
+
+  // clang-format on
 }
 
 TEST_P(DynamicProblem, RemoveInvalidResidualBlockDies) {
@@ -892,6 +895,8 @@
   problem->AddParameterBlock(z, 5);
   problem->AddParameterBlock(w, 3);
 
+  // clang-format off
+
   // Add all combinations of cost functions.
   CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
   CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
@@ -909,6 +914,8 @@
   ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
   ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
 
+  // clang-format on
+
   // Remove r_yzw.
   problem->RemoveResidualBlock(r_yzw);
   ASSERT_EQ(3, problem->NumParameterBlocks());
@@ -938,7 +945,7 @@
 }
 
 // Check that a null-terminated array, a, has the same elements as b.
-template<typename T>
+template <typename T>
 void ExpectVectorContainsUnordered(const T* a, const vector<T>& b) {
   // Compute the size of a.
   int size = 0;
@@ -963,8 +970,8 @@
 }
 
 static void ExpectProblemHasResidualBlocks(
-    const ProblemImpl &problem,
-    const ResidualBlockId *expected_residual_blocks) {
+    const ProblemImpl& problem,
+    const ResidualBlockId* expected_residual_blocks) {
   vector<ResidualBlockId> residual_blocks;
   problem.GetResidualBlocks(&residual_blocks);
   ExpectVectorContainsUnordered(expected_residual_blocks, residual_blocks);
@@ -975,6 +982,8 @@
   problem->AddParameterBlock(z, 5);
   problem->AddParameterBlock(w, 3);
 
+  // clang-format off
+
   // Add all combinations of cost functions.
   CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
   CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
@@ -1070,6 +1079,8 @@
         get_parameter_blocks_cases[i].expected_parameter_blocks,
         parameter_blocks);
   }
+
+  // clang-format on
 }
 
 INSTANTIATE_TEST_SUITE_P(OptionsInstantiation,
@@ -1108,8 +1119,8 @@
     for (int j = 0; j < kNumParameterBlocks; ++j) {
       if (jacobians[j] != NULL) {
         MatrixRef(jacobians[j], kNumResiduals, kNumResiduals) =
-            (-2.0 * (j + 1.0) *
-             ConstVectorRef(parameters[j], kNumResiduals)).asDiagonal();
+            (-2.0 * (j + 1.0) * ConstVectorRef(parameters[j], kNumResiduals))
+                .asDiagonal();
       }
     }
 
@@ -1142,31 +1153,20 @@
     parameter_blocks_.push_back(parameters_ + 2);
     parameter_blocks_.push_back(parameters_ + 4);
 
-
     CostFunction* cost_function = new QuadraticCostFunction<2, 2>;
 
     // f(x, y)
-    residual_blocks_.push_back(
-        problem_.AddResidualBlock(cost_function,
-                                  NULL,
-                                  parameters_,
-                                  parameters_ + 2));
+    residual_blocks_.push_back(problem_.AddResidualBlock(
+        cost_function, NULL, parameters_, parameters_ + 2));
     // g(y, z)
-    residual_blocks_.push_back(
-        problem_.AddResidualBlock(cost_function,
-                                  NULL, parameters_ + 2,
-                                  parameters_ + 4));
+    residual_blocks_.push_back(problem_.AddResidualBlock(
+        cost_function, NULL, parameters_ + 2, parameters_ + 4));
     // h(z, x)
-    residual_blocks_.push_back(
-        problem_.AddResidualBlock(cost_function,
-                                  NULL,
-                                  parameters_ + 4,
-                                  parameters_));
+    residual_blocks_.push_back(problem_.AddResidualBlock(
+        cost_function, NULL, parameters_ + 4, parameters_));
   }
 
-  void TearDown() {
-    EXPECT_TRUE(problem_.program().IsValid());
-  }
+  void TearDown() { EXPECT_TRUE(problem_.program().IsValid()); }
 
   void EvaluateAndCompare(const Problem::EvaluateOptions& options,
                           const int expected_num_rows,
@@ -1225,8 +1225,8 @@
                          expected.num_cols,
                          expected.cost,
                          (i & 1) ? expected.residuals : NULL,
-                         (i & 2) ? expected.gradient  : NULL,
-                         (i & 4) ? expected.jacobian  : NULL);
+                         (i & 2) ? expected.gradient : NULL,
+                         (i & 4) ? expected.jacobian : NULL);
     }
   }
 
@@ -1236,8 +1236,8 @@
   vector<ResidualBlockId> residual_blocks_;
 };
 
-
 TEST_F(ProblemEvaluateTest, MultipleParameterAndResidualBlocks) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 6,
@@ -1263,11 +1263,13 @@
                      0.0, -8.0,   0.0,   0.0,   0.0, -12.0
     }
   };
+  // clang-format on
 
   CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected);
 }
 
 TEST_F(ProblemEvaluateTest, ParameterAndResidualBlocksPassedInOptions) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 6,
@@ -1293,6 +1295,7 @@
                      0.0, -8.0,   0.0,   0.0,   0.0, -12.0
     }
   };
+  // clang-format on
 
   Problem::EvaluateOptions evaluate_options;
   evaluate_options.parameter_blocks = parameter_blocks_;
@@ -1301,6 +1304,7 @@
 }
 
 TEST_F(ProblemEvaluateTest, ReorderedResidualBlocks) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 6,
@@ -1326,6 +1330,7 @@
                      0.0,  0.0,   0.0,  -8.0,   0.0, -24.0
     }
   };
+  // clang-format on
 
   Problem::EvaluateOptions evaluate_options;
   evaluate_options.parameter_blocks = parameter_blocks_;
@@ -1338,7 +1343,9 @@
   CheckAllEvaluationCombinations(evaluate_options, expected);
 }
 
-TEST_F(ProblemEvaluateTest, ReorderedResidualBlocksAndReorderedParameterBlocks) {
+TEST_F(ProblemEvaluateTest,
+       ReorderedResidualBlocksAndReorderedParameterBlocks) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 6,
@@ -1364,6 +1371,7 @@
                       0.0, -24.0,   0.0,  -8.0,   0.0,   0.0
     }
   };
+  // clang-format on
 
   Problem::EvaluateOptions evaluate_options;
   // z, y, x
@@ -1380,6 +1388,7 @@
 }
 
 TEST_F(ProblemEvaluateTest, ConstantParameterBlock) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 6,
@@ -1407,12 +1416,14 @@
                      0.0, -8.0,   0.0,   0.0,   0.0, -12.0
     }
   };
+  // clang-format on
 
   problem_.SetParameterBlockConstant(parameters_ + 2);
   CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected);
 }
 
 TEST_F(ProblemEvaluateTest, ExcludedAResidualBlock) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     4, 6,
@@ -1435,6 +1446,7 @@
                      0.0, -8.0,   0.0,   0.0,   0.0, -12.0
     }
   };
+  // clang-format on
 
   Problem::EvaluateOptions evaluate_options;
   evaluate_options.residual_blocks.push_back(residual_blocks_[0]);
@@ -1444,6 +1456,7 @@
 }
 
 TEST_F(ProblemEvaluateTest, ExcludedParameterBlock) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 4,
@@ -1470,6 +1483,7 @@
                      0.0, -8.0,   0.0, -12.0
     }
   };
+  // clang-format on
 
   Problem::EvaluateOptions evaluate_options;
   // x, z
@@ -1480,6 +1494,7 @@
 }
 
 TEST_F(ProblemEvaluateTest, ExcludedParameterBlockAndExcludedResidualBlock) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     4, 4,
@@ -1503,6 +1518,7 @@
                      0.0,  0.0,   0.0, -24.0,
     }
   };
+  // clang-format on
 
   Problem::EvaluateOptions evaluate_options;
   // x, z
@@ -1515,6 +1531,7 @@
 }
 
 TEST_F(ProblemEvaluateTest, LocalParameterization) {
+  // clang-format off
   ExpectedEvaluation expected = {
     // Rows/columns
     6, 5,
@@ -1540,12 +1557,12 @@
                      0.0, -8.0,   0.0,   0.0, -12.0
     }
   };
+  // clang-format on
 
   vector<int> constant_parameters;
   constant_parameters.push_back(0);
-  problem_.SetParameterization(parameters_ + 2,
-                               new SubsetParameterization(2,
-                                                          constant_parameters));
+  problem_.SetParameterization(
+      parameters_ + 2, new SubsetParameterization(2, constant_parameters));
 
   CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected);
 }
diff --git a/internal/ceres/program.h b/internal/ceres/program.h
index 7971299..c4935e3 100644
--- a/internal/ceres/program.h
+++ b/internal/ceres/program.h
@@ -36,8 +36,8 @@
 #include <string>
 #include <vector>
 
-#include "ceres/internal/port.h"
 #include "ceres/evaluation_callback.h"
+#include "ceres/internal/port.h"
 
 namespace ceres {
 namespace internal {
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
index 97ee590..36c9c64 100644
--- a/internal/ceres/program_evaluator.h
+++ b/internal/ceres/program_evaluator.h
@@ -80,7 +80,9 @@
 #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include <atomic>
 #include <map>
diff --git a/internal/ceres/program_test.cc b/internal/ceres/program_test.cc
index be6ad87..1d9f49c 100644
--- a/internal/ceres/program_test.cc
+++ b/internal/ceres/program_test.cc
@@ -99,7 +99,8 @@
   vector<double*> removed_parameter_blocks;
   double fixed_cost = 0.0;
   string message;
-  std::unique_ptr<Program> reduced_program(problem.program().CreateReducedProgram(
+  std::unique_ptr<Program> reduced_program(
+      problem.program().CreateReducedProgram(
           &removed_parameter_blocks, &fixed_cost, &message));
 
   EXPECT_EQ(reduced_program->NumParameterBlocks(), 3);
@@ -130,7 +131,6 @@
   EXPECT_EQ(fixed_cost, 9.0);
 }
 
-
 TEST(Program, RemoveFixedBlocksNoResidualBlocks) {
   ProblemImpl problem;
   double x;
@@ -215,17 +215,13 @@
   problem.AddResidualBlock(new BinaryCostFunction(), nullptr, &x, &y);
   problem.SetParameterBlockConstant(&x);
 
-  ResidualBlock *expected_removed_block =
+  ResidualBlock* expected_removed_block =
       problem.program().residual_blocks()[0];
   std::unique_ptr<double[]> scratch(
       new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
   double expected_fixed_cost;
-  expected_removed_block->Evaluate(true,
-                                   &expected_fixed_cost,
-                                   nullptr,
-                                   nullptr,
-                                   scratch.get());
-
+  expected_removed_block->Evaluate(
+      true, &expected_fixed_cost, nullptr, nullptr, scratch.get());
 
   vector<double*> removed_parameter_blocks;
   double fixed_cost = 0.0;
@@ -323,9 +319,7 @@
   EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
 }
 
-INSTANTIATE_TEST_SUITE_P(AllColumns,
-                         BlockJacobianTest,
-                         ::testing::Range(0, 7));
+INSTANTIATE_TEST_SUITE_P(AllColumns, BlockJacobianTest, ::testing::Range(0, 7));
 
 template <int kNumResiduals, int kNumParameterBlocks>
 class NumParameterBlocksCostFunction : public CostFunction {
@@ -337,8 +331,7 @@
     }
   }
 
-  virtual ~NumParameterBlocksCostFunction() {
-  }
+  virtual ~NumParameterBlocksCostFunction() {}
 
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -403,8 +396,8 @@
   problem.AddResidualBlock(new MockCostFunctionBase<1, 2>(), nullptr, x);
   string error;
   EXPECT_FALSE(problem.program().ParameterBlocksAreFinite(&error));
-  EXPECT_NE(error.find("has at least one invalid value"),
-            string::npos) << error;
+  EXPECT_NE(error.find("has at least one invalid value"), string::npos)
+      << error;
 }
 
 TEST(Program, InfeasibleParameterBlock) {
diff --git a/internal/ceres/random.h b/internal/ceres/random.h
index 87d9d77..6b280f9 100644
--- a/internal/ceres/random.h
+++ b/internal/ceres/random.h
@@ -34,13 +34,12 @@
 
 #include <cmath>
 #include <cstdlib>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
 
-inline void SetRandomState(int state) {
-  srand(state);
-}
+inline void SetRandomState(int state) { srand(state); }
 
 inline int Uniform(int n) {
   if (n) {
@@ -63,7 +62,7 @@
     x1 = 2.0 * RandDouble() - 1.0;
     x2 = 2.0 * RandDouble() - 1.0;
     w = x1 * x1 + x2 * x2;
-  } while ( w >= 1.0 || w == 0.0 );
+  } while (w >= 1.0 || w == 0.0);
 
   w = sqrt((-2.0 * log(w)) / w);
   return x1 * w;
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
index aa6032a..5d80236 100644
--- a/internal/ceres/reorder_program.cc
+++ b/internal/ceres/reorder_program.cc
@@ -35,6 +35,7 @@
 #include <numeric>
 #include <vector>
 
+#include "Eigen/SparseCore"
 #include "ceres/cxsparse.h"
 #include "ceres/internal/port.h"
 #include "ceres/ordered_groups.h"
@@ -47,7 +48,6 @@
 #include "ceres/suitesparse.h"
 #include "ceres/triplet_sparse_matrix.h"
 #include "ceres/types.h"
-#include "Eigen/SparseCore"
 
 #ifdef CERES_USE_EIGEN_SPARSE
 #include "Eigen/OrderingMethods"
@@ -78,8 +78,8 @@
       CHECK_NE(parameter_block->index(), -1)
           << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
           << "This is a Ceres bug; please contact the developers!";
-      min_parameter_block_position = std::min(parameter_block->index(),
-                                              min_parameter_block_position);
+      min_parameter_block_position =
+          std::min(parameter_block->index(), min_parameter_block_position);
     }
   }
   return min_parameter_block_position;
@@ -117,9 +117,8 @@
              << "Please report this error to the developers.";
 #else
   SuiteSparse ss;
-  cholmod_sparse* block_jacobian_transpose =
-      ss.CreateSparseMatrix(
-          const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+  cholmod_sparse* block_jacobian_transpose = ss.CreateSparseMatrix(
+      const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
 
   // No CAMD or the user did not supply a useful ordering, then just
   // use regular AMD.
@@ -129,18 +128,16 @@
   } else {
     vector<int> constraints;
     for (int i = 0; i < parameter_blocks.size(); ++i) {
-      constraints.push_back(
-          parameter_block_ordering.GroupId(
-              parameter_blocks[i]->mutable_user_state()));
+      constraints.push_back(parameter_block_ordering.GroupId(
+          parameter_blocks[i]->mutable_user_state()));
     }
 
     // Renumber the entries of constraints to be contiguous integers
     // as CAMD requires that the group ids be in the range [0,
     // parameter_blocks.size() - 1].
     MapValuesToContiguousRange(constraints.size(), &constraints[0]);
-    ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
-                                                   &constraints[0],
-                                                   ordering);
+    ss.ConstrainedApproximateMinimumDegreeOrdering(
+        block_jacobian_transpose, &constraints[0], ordering);
   }
 
   VLOG(2) << "Block ordering stats: "
@@ -153,20 +150,18 @@
 }
 
 void OrderingForSparseNormalCholeskyUsingCXSparse(
-    const TripletSparseMatrix& tsm_block_jacobian_transpose,
-    int* ordering) {
+    const TripletSparseMatrix& tsm_block_jacobian_transpose, int* ordering) {
 #ifdef CERES_NO_CXSPARSE
   LOG(FATAL) << "Congratulations, you found a Ceres bug! "
              << "Please report this error to the developers.";
-#else  // CERES_NO_CXSPARSE
+#else
   // CXSparse works with J'J instead of J'. So compute the block
   // sparsity for J'J and compute an approximate minimum degree
   // ordering.
   CXSparse cxsparse;
   cs_di* block_jacobian_transpose;
-  block_jacobian_transpose =
-      cxsparse.CreateSparseMatrix(
-            const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+  block_jacobian_transpose = cxsparse.CreateSparseMatrix(
+      const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
   cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
   cs_di* block_hessian =
       cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
@@ -178,16 +173,13 @@
 #endif  // CERES_NO_CXSPARSE
 }
 
-
 void OrderingForSparseNormalCholeskyUsingEigenSparse(
-    const TripletSparseMatrix& tsm_block_jacobian_transpose,
-    int* ordering) {
+    const TripletSparseMatrix& tsm_block_jacobian_transpose, int* ordering) {
 #ifndef CERES_USE_EIGEN_SPARSE
-  LOG(FATAL) <<
-      "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
-      "because Ceres was not built with support for "
-      "Eigen's SimplicialLDLT decomposition. "
-      "This requires enabling building with -DEIGENSPARSE=ON.";
+  LOG(FATAL) << "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+                "because Ceres was not built with support for "
+                "Eigen's SimplicialLDLT decomposition. "
+                "This requires enabling building with -DEIGENSPARSE=ON.";
 #else
 
   // This conversion from a TripletSparseMatrix to a Eigen::Triplet
@@ -218,13 +210,14 @@
                    const ParameterBlockOrdering& ordering,
                    Program* program,
                    string* error) {
-  const int num_parameter_blocks =  program->NumParameterBlocks();
+  const int num_parameter_blocks = program->NumParameterBlocks();
   if (ordering.NumElements() != num_parameter_blocks) {
-    *error = StringPrintf("User specified ordering does not have the same "
-                          "number of parameters as the problem. The problem"
-                          "has %d blocks while the ordering has %d blocks.",
-                          num_parameter_blocks,
-                          ordering.NumElements());
+    *error = StringPrintf(
+        "User specified ordering does not have the same "
+        "number of parameters as the problem. The problem"
+        "has %d blocks while the ordering has %d blocks.",
+        num_parameter_blocks,
+        ordering.NumElements());
     return false;
   }
 
@@ -238,10 +231,11 @@
     for (double* parameter_block_ptr : group) {
       auto it = parameter_map.find(parameter_block_ptr);
       if (it == parameter_map.end()) {
-        *error = StringPrintf("User specified ordering contains a pointer "
-                              "to a double that is not a parameter block in "
-                              "the problem. The invalid double is in group: %d",
-                              p.first);
+        *error = StringPrintf(
+            "User specified ordering contains a pointer "
+            "to a double that is not a parameter block in "
+            "the problem. The invalid double is in group: %d",
+            p.first);
         return false;
       }
       parameter_blocks->push_back(it->second);
@@ -265,8 +259,8 @@
   vector<int> min_position_per_residual(residual_blocks->size());
   for (int i = 0; i < residual_blocks->size(); ++i) {
     ResidualBlock* residual_block = (*residual_blocks)[i];
-    int position = MinParameterBlock(residual_block,
-                                     size_of_first_elimination_group);
+    int position =
+        MinParameterBlock(residual_block, size_of_first_elimination_group);
     min_position_per_residual[i] = position;
     DCHECK_LE(position, size_of_first_elimination_group);
     residual_blocks_per_e_block[position]++;
@@ -284,8 +278,8 @@
       << "to the developers.";
 
   CHECK(find(residual_blocks_per_e_block.begin(),
-             residual_blocks_per_e_block.end() - 1, 0) !=
-        residual_blocks_per_e_block.end())
+             residual_blocks_per_e_block.end() - 1,
+             0) != residual_blocks_per_e_block.end())
       << "Congratulations, you found a Ceres bug! Please report this error "
       << "to the developers.";
 
@@ -334,8 +328,7 @@
 // Pre-order the columns corresponding to the schur complement if
 // possible.
 static void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
-    const ParameterBlockOrdering& parameter_block_ordering,
-    Program* program) {
+    const ParameterBlockOrdering& parameter_block_ordering, Program* program) {
 #ifndef CERES_NO_SUITESPARSE
   SuiteSparse ss;
   if (!SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
@@ -347,9 +340,8 @@
       *(program->mutable_parameter_blocks());
 
   for (int i = 0; i < parameter_blocks.size(); ++i) {
-    constraints.push_back(
-        parameter_block_ordering.GroupId(
-            parameter_blocks[i]->mutable_user_state()));
+    constraints.push_back(parameter_block_ordering.GroupId(
+        parameter_blocks[i]->mutable_user_state()));
   }
 
   // Renumber the entries of constraints to be contiguous integers as
@@ -365,9 +357,8 @@
       ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
 
   vector<int> ordering(parameter_blocks.size(), 0);
-  ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
-                                                 &constraints[0],
-                                                 &ordering[0]);
+  ss.ConstrainedApproximateMinimumDegreeOrdering(
+      block_jacobian_transpose, &constraints[0], &ordering[0]);
   ss.Free(block_jacobian_transpose);
 
   const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
@@ -396,10 +387,7 @@
   // Vertically partition the jacobian in parameter blocks of type E
   // and F.
   const SparseMatrix E =
-      block_jacobian.block(0,
-                           0,
-                           num_rows,
-                           size_of_first_elimination_group);
+      block_jacobian.block(0, 0, num_rows, size_of_first_elimination_group);
   const SparseMatrix F =
       block_jacobian.block(0,
                            size_of_first_elimination_group,
@@ -482,22 +470,17 @@
 
     // Verify that the first elimination group is an independent set.
     const set<double*>& first_elimination_group =
-        parameter_block_ordering
-        ->group_to_elements()
-        .begin()
-        ->second;
+        parameter_block_ordering->group_to_elements().begin()->second;
     if (!program->IsParameterBlockSetIndependent(first_elimination_group)) {
-      *error =
-          StringPrintf("The first elimination group in the parameter block "
-                       "ordering of size %zd is not an independent set",
-                       first_elimination_group.size());
+      *error = StringPrintf(
+          "The first elimination group in the parameter block "
+          "ordering of size %zd is not an independent set",
+          first_elimination_group.size());
       return false;
     }
 
-    if (!ApplyOrdering(parameter_map,
-                       *parameter_block_ordering,
-                       program,
-                       error)) {
+    if (!ApplyOrdering(
+            parameter_map, *parameter_block_ordering, program, error)) {
       return false;
     }
   }
@@ -510,13 +493,10 @@
   if (linear_solver_type == SPARSE_SCHUR) {
     if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
       MaybeReorderSchurComplementColumnsUsingSuiteSparse(
-          *parameter_block_ordering,
-          program);
+          *parameter_block_ordering, program);
     } else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
       MaybeReorderSchurComplementColumnsUsingEigen(
-          size_of_first_elimination_group,
-          parameter_map,
-          program);
+          size_of_first_elimination_group, parameter_map, program);
     }
   }
 
@@ -556,9 +536,8 @@
         parameter_block_ordering,
         &ordering[0]);
   } else if (sparse_linear_algebra_library_type == CX_SPARSE) {
-    OrderingForSparseNormalCholeskyUsingCXSparse(
-        *tsm_block_jacobian_transpose,
-        &ordering[0]);
+    OrderingForSparseNormalCholeskyUsingCXSparse(*tsm_block_jacobian_transpose,
+                                                 &ordering[0]);
   } else if (sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
     // Accelerate does not provide a function to perform reordering without
     // performing a full symbolic factorisation.  As such, we have nothing
@@ -570,8 +549,7 @@
 
   } else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
     OrderingForSparseNormalCholeskyUsingEigenSparse(
-        *tsm_block_jacobian_transpose,
-        &ordering[0]);
+        *tsm_block_jacobian_transpose, &ordering[0]);
   }
 
   // Apply ordering.
@@ -588,11 +566,11 @@
     const std::unordered_set<ResidualBlockId>& bottom_residual_blocks,
     Program* program) {
   auto residual_blocks = program->mutable_residual_blocks();
-  auto it = std::partition(
-      residual_blocks->begin(), residual_blocks->end(),
-      [&bottom_residual_blocks](ResidualBlock* r) {
-        return bottom_residual_blocks.count(r) == 0;
-      });
+  auto it = std::partition(residual_blocks->begin(),
+                           residual_blocks->end(),
+                           [&bottom_residual_blocks](ResidualBlock* r) {
+                             return bottom_residual_blocks.count(r) == 0;
+                           });
   return it - residual_blocks->begin();
 }
 
diff --git a/internal/ceres/reorder_program.h b/internal/ceres/reorder_program.h
index 88cbee3..e32079c 100644
--- a/internal/ceres/reorder_program.h
+++ b/internal/ceres/reorder_program.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_REORDER_PROGRAM_H_
 
 #include <string>
+
 #include "ceres/internal/port.h"
 #include "ceres/parameter_block_ordering.h"
 #include "ceres/problem_impl.h"
diff --git a/internal/ceres/reorder_program_test.cc b/internal/ceres/reorder_program_test.cc
index 2cfc123..83c867a 100644
--- a/internal/ceres/reorder_program_test.cc
+++ b/internal/ceres/reorder_program_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/reorder_program.h"
 
 #include <random>
+
 #include "ceres/parameter_block.h"
 #include "ceres/problem_impl.h"
 #include "ceres/program.h"
@@ -107,9 +108,7 @@
 
   std::string message;
   EXPECT_TRUE(LexicographicallyOrderResidualBlocks(
-                  2,
-                  problem.mutable_program(),
-                  &message));
+      2, problem.mutable_program(), &message));
   EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
   for (int i = 0; i < expected_residual_blocks.size(); ++i) {
     EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
@@ -132,10 +131,8 @@
 
   Program program(problem.program());
   std::string message;
-  EXPECT_FALSE(ApplyOrdering(problem.parameter_map(),
-                             linear_solver_ordering,
-                             &program,
-                             &message));
+  EXPECT_FALSE(ApplyOrdering(
+      problem.parameter_map(), linear_solver_ordering, &program, &message));
 }
 
 TEST(_, ApplyOrderingNormal) {
@@ -156,10 +153,8 @@
   Program* program = problem.mutable_program();
   std::string message;
 
-  EXPECT_TRUE(ApplyOrdering(problem.parameter_map(),
-                            linear_solver_ordering,
-                            program,
-                            &message));
+  EXPECT_TRUE(ApplyOrdering(
+      problem.parameter_map(), linear_solver_ordering, program, &message));
   const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
 
   EXPECT_EQ(parameter_blocks.size(), 3);
@@ -169,8 +164,8 @@
 }
 
 #ifndef CERES_NO_SUITESPARSE
-class ReorderProgramFoSparseCholeskyUsingSuiteSparseTest :
-      public ::testing::Test {
+class ReorderProgramFoSparseCholeskyUsingSuiteSparseTest
+    : public ::testing::Test {
  protected:
   void SetUp() {
     problem_.AddResidualBlock(new UnaryCostFunction(), nullptr, &x_);
@@ -188,12 +183,11 @@
         program->parameter_blocks();
 
     std::string error;
-    EXPECT_TRUE(ReorderProgramForSparseCholesky(
-                    ceres::SUITE_SPARSE,
-                    linear_solver_ordering,
-                    0, /* use all rows */
-                    program,
-                    &error));
+    EXPECT_TRUE(ReorderProgramForSparseCholesky(ceres::SUITE_SPARSE,
+                                                linear_solver_ordering,
+                                                0, /* use all rows */
+                                                program,
+                                                &error));
     const vector<ParameterBlock*>& ordered_parameter_blocks =
         program->parameter_blocks();
     EXPECT_EQ(ordered_parameter_blocks.size(),
@@ -219,8 +213,7 @@
   ComputeAndValidateOrdering(linear_solver_ordering);
 }
 
-TEST_F(ReorderProgramFoSparseCholeskyUsingSuiteSparseTest,
-       ContiguousGroups) {
+TEST_F(ReorderProgramFoSparseCholeskyUsingSuiteSparseTest, ContiguousGroups) {
   ParameterBlockOrdering linear_solver_ordering;
   linear_solver_ordering.AddElementToGroup(&x_, 0);
   linear_solver_ordering.AddElementToGroup(&y_, 1);
@@ -229,8 +222,7 @@
   ComputeAndValidateOrdering(linear_solver_ordering);
 }
 
-TEST_F(ReorderProgramFoSparseCholeskyUsingSuiteSparseTest,
-       GroupsWithGaps) {
+TEST_F(ReorderProgramFoSparseCholeskyUsingSuiteSparseTest, GroupsWithGaps) {
   ParameterBlockOrdering linear_solver_ordering;
   linear_solver_ordering.AddElementToGroup(&x_, 0);
   linear_solver_ordering.AddElementToGroup(&y_, 2);
diff --git a/internal/ceres/residual_block.cc b/internal/ceres/residual_block.cc
index 0bf30bc..067c9ef 100644
--- a/internal/ceres/residual_block.cc
+++ b/internal/ceres/residual_block.cc
@@ -34,6 +34,7 @@
 #include <algorithm>
 #include <cstddef>
 #include <vector>
+
 #include "ceres/corrector.h"
 #include "ceres/cost_function.h"
 #include "ceres/internal/eigen.h"
@@ -50,8 +51,10 @@
 namespace internal {
 
 ResidualBlock::ResidualBlock(
-    const CostFunction* cost_function, const LossFunction* loss_function,
-    const std::vector<ParameterBlock*>& parameter_blocks, int index)
+    const CostFunction* cost_function,
+    const LossFunction* loss_function,
+    const std::vector<ParameterBlock*>& parameter_blocks,
+    int index)
     : cost_function_(cost_function),
       loss_function_(loss_function),
       parameter_blocks_(
@@ -111,22 +114,18 @@
     return false;
   }
 
-  if (!IsEvaluationValid(*this,
-                         parameters.data(),
-                         cost,
-                         residuals,
-                         eval_jacobians)) {
+  if (!IsEvaluationValid(
+          *this, parameters.data(), cost, residuals, eval_jacobians)) {
+    // clang-format off
     std::string message =
         "\n\n"
         "Error in evaluating the ResidualBlock.\n\n"
         "There are two possible reasons. Either the CostFunction did not evaluate and fill all    \n"  // NOLINT
         "residual and jacobians that were requested or there was a non-finite value (nan/infinite)\n"  // NOLINT
         "generated during the or jacobian computation. \n\n" +
-        EvaluationToString(*this,
-                           parameters.data(),
-                           cost,
-                           residuals,
-                           eval_jacobians);
+        EvaluationToString(
+            *this, parameters.data(), cost, residuals, eval_jacobians);
+    // clang-format on
     LOG(WARNING) << message;
     return false;
   }
@@ -149,7 +148,11 @@
               parameter_block->LocalParameterizationJacobian(),
               parameter_block->Size(),
               parameter_block->LocalSize(),
-              jacobians[i], 0, 0,  num_residuals, parameter_block->LocalSize());
+              jacobians[i],
+              0,
+              0,
+              num_residuals,
+              parameter_block->LocalSize());
         }
       }
     }
diff --git a/internal/ceres/residual_block.h b/internal/ceres/residual_block.h
index a2e4425..3d75339 100644
--- a/internal/ceres/residual_block.h
+++ b/internal/ceres/residual_block.h
@@ -105,7 +105,6 @@
                 double** jacobians,
                 double* scratch) const;
 
-
   const CostFunction* cost_function() const { return cost_function_; }
   const LossFunction* loss_function() const { return loss_function_; }
 
diff --git a/internal/ceres/residual_block_test.cc b/internal/ceres/residual_block_test.cc
index 482e7ce..3c05f48 100644
--- a/internal/ceres/residual_block_test.cc
+++ b/internal/ceres/residual_block_test.cc
@@ -31,11 +31,12 @@
 #include "ceres/residual_block.h"
 
 #include <cstdint>
-#include "gtest/gtest.h"
-#include "ceres/parameter_block.h"
-#include "ceres/sized_cost_function.h"
+
 #include "ceres/internal/eigen.h"
 #include "ceres/local_parameterization.h"
+#include "ceres/parameter_block.h"
+#include "ceres/sized_cost_function.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -43,7 +44,7 @@
 using std::vector;
 
 // Trivial cost function that accepts three arguments.
-class TernaryCostFunction: public CostFunction {
+class TernaryCostFunction : public CostFunction {
  public:
   TernaryCostFunction(int num_residuals,
                       int32_t parameter_block1_size,
@@ -64,9 +65,8 @@
     if (jacobians) {
       for (int k = 0; k < 3; ++k) {
         if (jacobians[k] != NULL) {
-          MatrixRef jacobian(jacobians[k],
-                             num_residuals(),
-                             parameter_block_sizes()[k]);
+          MatrixRef jacobian(
+              jacobians[k], num_residuals(), parameter_block_sizes()[k]);
           jacobian.setConstant(k);
         }
       }
@@ -109,12 +109,12 @@
   // Verify cost-only evaluation.
   double cost;
   residual_block.Evaluate(true, &cost, NULL, NULL, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
 
   // Verify cost and residual evaluation.
   double residuals[3];
   residual_block.Evaluate(true, &cost, residuals, NULL, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
   EXPECT_EQ(2.0, residuals[2]);
@@ -131,14 +131,11 @@
   jacobian_ry.setConstant(-1.0);
   jacobian_rz.setConstant(-1.0);
 
-  double *jacobian_ptrs[3] = {
-    jacobian_rx.data(),
-    jacobian_ry.data(),
-    jacobian_rz.data()
-  };
+  double* jacobian_ptrs[3] = {
+      jacobian_rx.data(), jacobian_ry.data(), jacobian_rz.data()};
 
   residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
   EXPECT_EQ(2.0, residuals[2]);
@@ -157,18 +154,20 @@
   jacobian_ptrs[1] = NULL;  // Don't compute the jacobian for y.
 
   residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
   EXPECT_EQ(2.0, residuals[2]);
 
+  // clang-format off
   EXPECT_TRUE((jacobian_rx.array() ==  0.0).all()) << "\n" << jacobian_rx;
   EXPECT_TRUE((jacobian_ry.array() == -1.0).all()) << "\n" << jacobian_ry;
   EXPECT_TRUE((jacobian_rz.array() ==  2.0).all()) << "\n" << jacobian_rz;
+  // clang-format on
 }
 
 // Trivial cost function that accepts three arguments.
-class LocallyParameterizedCostFunction: public SizedCostFunction<3, 2, 3, 4> {
+class LocallyParameterizedCostFunction : public SizedCostFunction<3, 2, 3, 4> {
  public:
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -189,9 +188,8 @@
         //   0 1 2 3 4 ...
         //
         if (jacobians[k] != NULL) {
-          MatrixRef jacobian(jacobians[k],
-                             num_residuals(),
-                             parameter_block_sizes()[k]);
+          MatrixRef jacobian(
+              jacobians[k], num_residuals(), parameter_block_sizes()[k]);
           for (int j = 0; j < k + 2; ++j) {
             jacobian.col(j).setConstant(j);
           }
@@ -243,17 +241,17 @@
   EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
   EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
   EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
-  EXPECT_EQ(3*(2 + 4) + 3, residual_block.NumScratchDoublesForEvaluate());
+  EXPECT_EQ(3 * (2 + 4) + 3, residual_block.NumScratchDoublesForEvaluate());
 
   // Verify cost-only evaluation.
   double cost;
   residual_block.Evaluate(true, &cost, NULL, NULL, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
 
   // Verify cost and residual evaluation.
   double residuals[3];
   residual_block.Evaluate(true, &cost, residuals, NULL, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
   EXPECT_EQ(2.0, residuals[2]);
@@ -270,18 +268,17 @@
   jacobian_ry.setConstant(-1.0);
   jacobian_rz.setConstant(-1.0);
 
-  double *jacobian_ptrs[3] = {
-    jacobian_rx.data(),
-    jacobian_ry.data(),
-    jacobian_rz.data()
-  };
+  double* jacobian_ptrs[3] = {
+      jacobian_rx.data(), jacobian_ry.data(), jacobian_rz.data()};
 
   residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
   EXPECT_EQ(2.0, residuals[2]);
 
+  // clang-format off
+
   Matrix expected_jacobian_rx(3, 1);
   expected_jacobian_rx << 1.0, 1.0, 1.0;
 
@@ -305,6 +302,8 @@
       << "\nExpected:\n " << expected_jacobian_rz
       << "\nActual:\n"   << jacobian_rz;
 
+  // clang-format on
+
   // Verify cost, residual, and partial jacobian evaluation.
   cost = 0.0;
   VectorRef(residuals, 3).setConstant(0.0);
@@ -315,7 +314,7 @@
   jacobian_ptrs[1] = NULL;  // Don't compute the jacobian for y.
 
   residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
-  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
   EXPECT_EQ(2.0, residuals[2]);
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
index 35e928b..d5b3fa1 100644
--- a/internal/ceres/residual_block_utils.cc
+++ b/internal/ceres/residual_block_utils.cc
@@ -33,6 +33,7 @@
 #include <cmath>
 #include <cstddef>
 #include <limits>
+
 #include "ceres/array_utils.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/port.h"
@@ -75,6 +76,7 @@
   const int num_residuals = block.NumResiduals();
   string result = "";
 
+  // clang-format off
   StringAppendF(&result,
                 "Residual Block size: %d parameter blocks x %d residuals\n\n",
                 num_parameter_blocks, num_residuals);
@@ -85,6 +87,7 @@
       "of the Jacobian/residual array was requested but was not written to by user code, it is \n"  // NOLINT
       "indicated by 'Uninitialized'. This is an error. Residuals or Jacobian values evaluating \n"  // NOLINT
       "to Inf or NaN is also an error.  \n\n"; // NOLINT
+  // clang-format on
 
   string space = "Residuals:     ";
   result += space;
@@ -102,8 +105,8 @@
       for (int k = 0; k < num_residuals; ++k) {
         AppendArrayToString(1,
                             (jacobians != NULL && jacobians[i] != NULL)
-                            ? jacobians[i] + k * parameter_block_size + j
-                            : NULL,
+                                ? jacobians[i] + k * parameter_block_size + j
+                                : NULL,
                             &result);
       }
       StringAppendF(&result, "\n");
diff --git a/internal/ceres/residual_block_utils.h b/internal/ceres/residual_block_utils.h
index 627337f..41ae81a 100644
--- a/internal/ceres/residual_block_utils.h
+++ b/internal/ceres/residual_block_utils.h
@@ -44,6 +44,7 @@
 #define CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
 
 #include <string>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
diff --git a/internal/ceres/residual_block_utils_test.cc b/internal/ceres/residual_block_utils_test.cc
index 6ad3729..331f5ab 100644
--- a/internal/ceres/residual_block_utils_test.cc
+++ b/internal/ceres/residual_block_utils_test.cc
@@ -28,15 +28,17 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/residual_block_utils.h"
+
 #include <cmath>
 #include <limits>
 #include <memory>
-#include "gtest/gtest.h"
+
+#include "ceres/cost_function.h"
 #include "ceres/parameter_block.h"
 #include "ceres/residual_block.h"
-#include "ceres/residual_block_utils.h"
-#include "ceres/cost_function.h"
 #include "ceres/sized_cost_function.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -49,10 +51,7 @@
   std::vector<ParameterBlock*> parameter_blocks;
   parameter_blocks.push_back(&parameter_block);
 
-  ResidualBlock residual_block(&cost_function,
-                               NULL,
-                               parameter_blocks,
-                               -1);
+  ResidualBlock residual_block(&cost_function, NULL, parameter_blocks, -1);
 
   std::unique_ptr<double[]> scratch(
       new double[residual_block.NumScratchDoublesForEvaluate()]);
@@ -60,18 +59,16 @@
   double cost;
   double residuals;
   double jacobian;
-  double* jacobians[] = { &jacobian };
+  double* jacobians[] = {&jacobian};
 
-  EXPECT_EQ(residual_block.Evaluate(true,
-                                    &cost,
-                                    &residuals,
-                                    jacobians,
-                                    scratch.get()), is_good);
+  EXPECT_EQ(residual_block.Evaluate(
+                true, &cost, &residuals, jacobians, scratch.get()),
+            is_good);
 }
 
 // A CostFunction that behaves normaly, i.e., it computes numerically
 // valid residuals and jacobians.
-class GoodCostFunction: public SizedCostFunction<1, 1> {
+class GoodCostFunction : public SizedCostFunction<1, 1> {
  public:
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -86,7 +83,7 @@
 
 // The following four CostFunctions simulate the different ways in
 // which user code can cause ResidualBlock::Evaluate to fail.
-class NoResidualUpdateCostFunction: public SizedCostFunction<1, 1> {
+class NoResidualUpdateCostFunction : public SizedCostFunction<1, 1> {
  public:
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -100,7 +97,7 @@
   }
 };
 
-class NoJacobianUpdateCostFunction: public SizedCostFunction<1, 1> {
+class NoJacobianUpdateCostFunction : public SizedCostFunction<1, 1> {
  public:
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -114,7 +111,7 @@
   }
 };
 
-class BadResidualCostFunction: public SizedCostFunction<1, 1> {
+class BadResidualCostFunction : public SizedCostFunction<1, 1> {
  public:
   bool Evaluate(double const* const* parameters,
                 double* residuals,
@@ -127,7 +124,7 @@
   }
 };
 
-class BadJacobianCostFunction: public SizedCostFunction<1, 1> {
+class BadJacobianCostFunction : public SizedCostFunction<1, 1> {
  public:
   bool Evaluate(double const* const* parameters,
                 double* residuals,
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
index 6a06632..fc39b31 100644
--- a/internal/ceres/rotation_test.cc
+++ b/internal/ceres/rotation_test.cc
@@ -28,14 +28,16 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/rotation.h"
+
 #include <cmath>
 #include <limits>
 #include <string>
+
 #include "ceres/internal/eigen.h"
-#include "ceres/is_close.h"
 #include "ceres/internal/port.h"
+#include "ceres/is_close.h"
 #include "ceres/jet.h"
-#include "ceres/rotation.h"
 #include "ceres/stringprintf.h"
 #include "ceres/test_util.h"
 #include "glog/logging.h"
@@ -45,8 +47,8 @@
 namespace ceres {
 namespace internal {
 
-using std::min;
 using std::max;
+using std::min;
 using std::numeric_limits;
 using std::string;
 using std::swap;
@@ -74,8 +76,8 @@
     return false;
   }
 
-  double norm2 = arg[0] * arg[0] + arg[1] * arg[1] +
-      arg[2] * arg[2] + arg[3] * arg[3];
+  double norm2 =
+      arg[0] * arg[0] + arg[1] * arg[1] + arg[2] * arg[2] + arg[3] * arg[3];
   if (fabs(norm2 - 1.0) > kTolerance) {
     *result_listener << "squared norm is " << norm2;
     return false;
@@ -120,6 +122,7 @@
     return true;
   }
 
+  // clang-format off
   *result_listener << "expected : "
                    << expected[0] << " "
                    << expected[1] << " "
@@ -130,6 +133,7 @@
                    << arg[1] << " "
                    << arg[2] << " "
                    << arg[3];
+  // clang-format on
   return false;
 }
 
@@ -164,6 +168,7 @@
     return true;
   }
 
+  // clang-format off
   *result_listener << " arg:"
                    << " " << arg[0]
                    << " " << arg[1]
@@ -172,6 +177,7 @@
                    << " " << expected[0]
                    << " " << expected[1]
                    << " " << expected[2];
+  // clang-format on
   return false;
 }
 
@@ -225,9 +231,9 @@
 
 // Transforms a zero axis/angle to a quaternion.
 TEST(Rotation, ZeroAngleAxisToQuaternion) {
-  double axis_angle[3] = { 0, 0, 0 };
+  double axis_angle[3] = {0, 0, 0};
   double quaternion[4];
-  double expected[4] = { 1, 0, 0, 0 };
+  double expected[4] = {1, 0, 0, 0};
   AngleAxisToQuaternion(axis_angle, quaternion);
   EXPECT_THAT(quaternion, IsNormalizedQuaternion());
   EXPECT_THAT(quaternion, IsNearQuaternion(expected));
@@ -237,9 +243,9 @@
 TEST(Rotation, SmallAngleAxisToQuaternion) {
   // Small, finite value to test.
   double theta = 1.0e-2;
-  double axis_angle[3] = { theta, 0, 0 };
+  double axis_angle[3] = {theta, 0, 0};
   double quaternion[4];
-  double expected[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  double expected[4] = {cos(theta / 2), sin(theta / 2.0), 0, 0};
   AngleAxisToQuaternion(axis_angle, quaternion);
   EXPECT_THAT(quaternion, IsNormalizedQuaternion());
   EXPECT_THAT(quaternion, IsNearQuaternion(expected));
@@ -249,9 +255,9 @@
 TEST(Rotation, TinyAngleAxisToQuaternion) {
   // Very small value that could potentially cause underflow.
   double theta = pow(numeric_limits<double>::min(), 0.75);
-  double axis_angle[3] = { theta, 0, 0 };
+  double axis_angle[3] = {theta, 0, 0};
   double quaternion[4];
-  double expected[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  double expected[4] = {cos(theta / 2), sin(theta / 2.0), 0, 0};
   AngleAxisToQuaternion(axis_angle, quaternion);
   EXPECT_THAT(quaternion, IsNormalizedQuaternion());
   EXPECT_THAT(quaternion, IsNearQuaternion(expected));
@@ -259,9 +265,9 @@
 
 // Transforms a rotation by pi/2 around X to a quaternion.
 TEST(Rotation, XRotationToQuaternion) {
-  double axis_angle[3] = { kPi / 2, 0, 0 };
+  double axis_angle[3] = {kPi / 2, 0, 0};
   double quaternion[4];
-  double expected[4] = { kHalfSqrt2, kHalfSqrt2, 0, 0 };
+  double expected[4] = {kHalfSqrt2, kHalfSqrt2, 0, 0};
   AngleAxisToQuaternion(axis_angle, quaternion);
   EXPECT_THAT(quaternion, IsNormalizedQuaternion());
   EXPECT_THAT(quaternion, IsNearQuaternion(expected));
@@ -269,18 +275,18 @@
 
 // Transforms a unit quaternion to an axis angle.
 TEST(Rotation, UnitQuaternionToAngleAxis) {
-  double quaternion[4] = { 1, 0, 0, 0 };
+  double quaternion[4] = {1, 0, 0, 0};
   double axis_angle[3];
-  double expected[3] = { 0, 0, 0 };
+  double expected[3] = {0, 0, 0};
   QuaternionToAngleAxis(quaternion, axis_angle);
   EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
 }
 
 // Transforms a quaternion that rotates by pi about the Y axis to an axis angle.
 TEST(Rotation, YRotationQuaternionToAngleAxis) {
-  double quaternion[4] = { 0, 0, 1, 0 };
+  double quaternion[4] = {0, 0, 1, 0};
   double axis_angle[3];
-  double expected[3] = { 0, kPi, 0 };
+  double expected[3] = {0, kPi, 0};
   QuaternionToAngleAxis(quaternion, axis_angle);
   EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
 }
@@ -288,9 +294,9 @@
 // Transforms a quaternion that rotates by pi/3 about the Z axis to an axis
 // angle.
 TEST(Rotation, ZRotationQuaternionToAngleAxis) {
-  double quaternion[4] = { sqrt(3) / 2, 0, 0, 0.5 };
+  double quaternion[4] = {sqrt(3) / 2, 0, 0, 0.5};
   double axis_angle[3];
-  double expected[3] = { 0, 0, kPi / 3 };
+  double expected[3] = {0, 0, kPi / 3};
   QuaternionToAngleAxis(quaternion, axis_angle);
   EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
 }
@@ -299,9 +305,9 @@
 TEST(Rotation, SmallQuaternionToAngleAxis) {
   // Small, finite value to test.
   double theta = 1.0e-2;
-  double quaternion[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  double quaternion[4] = {cos(theta / 2), sin(theta / 2.0), 0, 0};
   double axis_angle[3];
-  double expected[3] = { theta, 0, 0 };
+  double expected[3] = {theta, 0, 0};
   QuaternionToAngleAxis(quaternion, axis_angle);
   EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
 }
@@ -310,9 +316,9 @@
 TEST(Rotation, TinyQuaternionToAngleAxis) {
   // Very small value that could potentially cause underflow.
   double theta = pow(numeric_limits<double>::min(), 0.75);
-  double quaternion[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  double quaternion[4] = {cos(theta / 2), sin(theta / 2.0), 0, 0};
   double axis_angle[3];
-  double expected[3] = { theta, 0, 0 };
+  double expected[3] = {theta, 0, 0};
   QuaternionToAngleAxis(quaternion, axis_angle);
   EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
 }
@@ -328,9 +334,9 @@
   quaternion[2] = 0.0;
   quaternion[3] = 0.0;
   QuaternionToAngleAxis(quaternion, angle_axis);
-  const double angle = sqrt(angle_axis[0] * angle_axis[0] +
-                            angle_axis[1] * angle_axis[1] +
-                            angle_axis[2] * angle_axis[2]);
+  const double angle =
+      sqrt(angle_axis[0] * angle_axis[0] + angle_axis[1] * angle_axis[1] +
+           angle_axis[2] * angle_axis[2]);
   EXPECT_LE(angle, kPi);
 }
 
@@ -398,18 +404,18 @@
 
 // Transforms a zero axis/angle to a rotation matrix.
 TEST(Rotation, ZeroAngleAxisToRotationMatrix) {
-  double axis_angle[3] = { 0, 0, 0 };
+  double axis_angle[3] = {0, 0, 0};
   double matrix[9];
-  double expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+  double expected[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
   AngleAxisToRotationMatrix(axis_angle, matrix);
   EXPECT_THAT(matrix, IsOrthonormal());
   EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
 }
 
 TEST(Rotation, NearZeroAngleAxisToRotationMatrix) {
-  double axis_angle[3] = { 1e-24, 2e-24, 3e-24 };
+  double axis_angle[3] = {1e-24, 2e-24, 3e-24};
   double matrix[9];
-  double expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+  double expected[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
   AngleAxisToRotationMatrix(axis_angle, matrix);
   EXPECT_THAT(matrix, IsOrthonormal());
   EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
@@ -417,10 +423,10 @@
 
 // Transforms a rotation by pi/2 around X to a rotation matrix and back.
 TEST(Rotation, XRotationToRotationMatrix) {
-  double axis_angle[3] = { kPi / 2, 0, 0 };
+  double axis_angle[3] = {kPi / 2, 0, 0};
   double matrix[9];
   // The rotation matrices are stored column-major.
-  double expected[9] = { 1, 0, 0, 0, 0, 1, 0, -1, 0 };
+  double expected[9] = {1, 0, 0, 0, 0, 1, 0, -1, 0};
   AngleAxisToRotationMatrix(axis_angle, matrix);
   EXPECT_THAT(matrix, IsOrthonormal());
   EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
@@ -432,9 +438,9 @@
 // Transforms an axis angle that rotates by pi about the Y axis to a
 // rotation matrix and back.
 TEST(Rotation, YRotationToRotationMatrix) {
-  double axis_angle[3] = { 0, kPi, 0 };
+  double axis_angle[3] = {0, kPi, 0};
   double matrix[9];
-  double expected[9] = { -1, 0, 0, 0, 1, 0, 0, 0, -1 };
+  double expected[9] = {-1, 0, 0, 0, 1, 0, 0, 0, -1};
   AngleAxisToRotationMatrix(axis_angle, matrix);
   EXPECT_THAT(matrix, IsOrthonormal());
   EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
@@ -475,29 +481,31 @@
 
 TEST(Rotation, AtPiAngleAxisRoundTrip) {
   // A rotation of kPi about the X axis;
+  // clang-format off
   static constexpr double kMatrix[3][3] = {
     {1.0,  0.0,  0.0},
     {0.0,  -1.0,  0.0},
     {0.0,  0.0,  -1.0}
   };
+  // clang-format on
 
   double in_matrix[9];
   // Fill it from kMatrix in col-major order.
   for (int j = 0, k = 0; j < 3; ++j) {
-     for (int i = 0; i < 3; ++i, ++k) {
-       in_matrix[k] = kMatrix[i][j];
-     }
+    for (int i = 0; i < 3; ++i, ++k) {
+      in_matrix[k] = kMatrix[i][j];
+    }
   }
 
-  const double expected_axis_angle[3] = { kPi, 0, 0 };
+  const double expected_axis_angle[3] = {kPi, 0, 0};
 
   double out_matrix[9];
   double axis_angle[3];
   RotationMatrixToAngleAxis(in_matrix, axis_angle);
   AngleAxisToRotationMatrix(axis_angle, out_matrix);
 
-  LOG(INFO) << "AngleAxis = " << axis_angle[0] << " " << axis_angle[1]
-            << " " << axis_angle[2];
+  LOG(INFO) << "AngleAxis = " << axis_angle[0] << " " << axis_angle[1] << " "
+            << axis_angle[2];
   LOG(INFO) << "Expected AngleAxis = " << kPi << " 0 0";
   double out_rowmajor[3][3];
   for (int j = 0, k = 0; j < 3; ++j) {
@@ -526,13 +534,15 @@
 // Transforms an axis angle that rotates by pi/3 about the Z axis to a
 // rotation matrix.
 TEST(Rotation, ZRotationToRotationMatrix) {
-  double axis_angle[3] =  { 0, 0, kPi / 3 };
+  double axis_angle[3] = {0, 0, kPi / 3};
   double matrix[9];
   // This is laid-out row-major on the screen but is actually stored
   // column-major.
+  // clang-format off
   double expected[9] = { 0.5, sqrt(3) / 2, 0,   // Column 1
                          -sqrt(3) / 2, 0.5, 0,  // Column 2
                          0, 0, 1 };             // Column 3
+  // clang-format on
   AngleAxisToRotationMatrix(axis_angle, matrix);
   EXPECT_THAT(matrix, IsOrthonormal());
   EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
@@ -602,13 +612,12 @@
     RotationMatrixToAngleAxis(matrix, round_trip);
 
     for (int i = 0; i < 3; ++i) {
-      EXPECT_NEAR(round_trip[i], axis_angle[i],
-                  numeric_limits<double>::epsilon());
+      EXPECT_NEAR(
+          round_trip[i], axis_angle[i], numeric_limits<double>::epsilon());
     }
   }
 }
 
-
 // Transposes a 3x3 matrix.
 static void Transpose3x3(double m[9]) {
   swap(m[1], m[3]);
@@ -647,8 +656,7 @@
   for (double x = -1.0; x <= 1.0; x += 1.0) {
     for (double y = -1.0; y <= 1.0; y += 1.0) {
       for (double z = -1.0; z <= 1.0; z += 1.0) {
-        if ((x != 0) + (y != 0) + (z != 0) > 1)
-          continue;
+        if ((x != 0) + (y != 0) + (z != 0) > 1) continue;
         double axis_angle[3] = {x, y, z};
         double euler_angles[3] = {x, y, z};
         CompareEulerToAngleAxis(axis_angle, euler_angles);
@@ -710,7 +718,7 @@
 }  // namespace
 
 template <int N>
-bool IsClose(const Jet<double, N> &x, const Jet<double, N> &y) {
+bool IsClose(const Jet<double, N>& x, const Jet<double, N>& y) {
   if (!IsClose(x.a, y.a)) {
     return false;
   }
@@ -723,7 +731,7 @@
 }
 
 template <int M, int N>
-void ExpectJetArraysClose(const Jet<double, N> *x, const Jet<double, N> *y) {
+void ExpectJetArraysClose(const Jet<double, N>* x, const Jet<double, N>* y) {
   for (int i = 0; i < M; i++) {
     if (!IsClose(x[i], y[i])) {
       LOG(ERROR) << "Jet " << i << "/" << M << " not equal";
@@ -742,11 +750,11 @@
 
 // Log-10 of a value well below machine precision.
 static const int kSmallTinyCutoff =
-    static_cast<int>(2 * log(numeric_limits<double>::epsilon())/log(10.0));
+    static_cast<int>(2 * log(numeric_limits<double>::epsilon()) / log(10.0));
 
 // Log-10 of a value just below values representable by double.
-static const int kTinyZeroLimit   =
-    static_cast<int>(1 + log(numeric_limits<double>::min())/log(10.0));
+static const int kTinyZeroLimit =
+    static_cast<int>(1 + log(numeric_limits<double>::min()) / log(10.0));
 
 // Test that exact conversion works for small angles when jets are used.
 TEST(Rotation, SmallAngleAxisToQuaternionForJets) {
@@ -754,27 +762,26 @@
   // to be well within the range represented by doubles.
   for (int i = -2; i >= kSmallTinyCutoff; i--) {
     double theta = pow(10.0, i);
-    J3 axis_angle[3] = { J3(theta, 0), J3(0, 1), J3(0, 2) };
+    J3 axis_angle[3] = {J3(theta, 0), J3(0, 1), J3(0, 2)};
     J3 quaternion[4];
     J3 expected[4] = {
-        MakeJ3(cos(theta/2), -sin(theta/2)/2, 0, 0),
-        MakeJ3(sin(theta/2), cos(theta/2)/2, 0, 0),
-        MakeJ3(0, 0, sin(theta/2)/theta, 0),
-        MakeJ3(0, 0, 0, sin(theta/2)/theta),
+        MakeJ3(cos(theta / 2), -sin(theta / 2) / 2, 0, 0),
+        MakeJ3(sin(theta / 2), cos(theta / 2) / 2, 0, 0),
+        MakeJ3(0, 0, sin(theta / 2) / theta, 0),
+        MakeJ3(0, 0, 0, sin(theta / 2) / theta),
     };
     AngleAxisToQuaternion(axis_angle, quaternion);
     ExpectJetArraysClose<4, 3>(quaternion, expected);
   }
 }
 
-
 // Test that conversion works for very small angles when jets are used.
 TEST(Rotation, TinyAngleAxisToQuaternionForJets) {
   // Examine tiny x rotations that extend all the way to where
   // underflow occurs.
   for (int i = kSmallTinyCutoff; i >= kTinyZeroLimit; i--) {
     double theta = pow(10.0, i);
-    J3 axis_angle[3] = { J3(theta, 0), J3(0, 1), J3(0, 2) };
+    J3 axis_angle[3] = {J3(theta, 0), J3(0, 1), J3(0, 2)};
     J3 quaternion[4];
     // To avoid loss of precision in the test itself,
     // a finite expansion is used here, which will
@@ -792,7 +799,7 @@
 
 // Test that derivatives are correct for zero rotation.
 TEST(Rotation, ZeroAngleAxisToQuaternionForJets) {
-  J3 axis_angle[3] = { J3(0, 0), J3(0, 1), J3(0, 2) };
+  J3 axis_angle[3] = {J3(0, 0), J3(0, 1), J3(0, 2)};
   J3 quaternion[4];
   J3 expected[4] = {
       MakeJ3(1.0, 0, 0, 0),
@@ -812,13 +819,15 @@
     double theta = pow(10.0, i);
     double s = sin(theta);
     double c = cos(theta);
-    J4 quaternion[4] = { J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3) };
+    J4 quaternion[4] = {J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3)};
     J4 axis_angle[3];
+    // clang-format off
     J4 expected[3] = {
         MakeJ4(2*theta, -2*s, 2*c,  0,         0),
         MakeJ4(0,        0,   0,    2*theta/s, 0),
         MakeJ4(0,        0,   0,    0,         2*theta/s),
     };
+    // clang-format on
     QuaternionToAngleAxis(quaternion, axis_angle);
     ExpectJetArraysClose<3, 4>(axis_angle, expected);
   }
@@ -832,16 +841,18 @@
     double theta = pow(10.0, i);
     double s = sin(theta);
     double c = cos(theta);
-    J4 quaternion[4] = { J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3) };
+    J4 quaternion[4] = {J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3)};
     J4 axis_angle[3];
     // To avoid loss of precision in the test itself,
     // a finite expansion is used here, which will
     // be exact up to machine precision for the test values used.
+    // clang-format off
     J4 expected[3] = {
         MakeJ4(2*theta, -2*s, 2.0, 0,   0),
         MakeJ4(0,        0,   0,   2.0, 0),
         MakeJ4(0,        0,   0,   0,   2.0),
     };
+    // clang-format on
     QuaternionToAngleAxis(quaternion, axis_angle);
     ExpectJetArraysClose<3, 4>(axis_angle, expected);
   }
@@ -849,7 +860,7 @@
 
 // Test that conversion works for no rotation.
 TEST(Rotation, ZeroQuaternionToAngleAxisForJets) {
-  J4 quaternion[4] = { J4(1, 0), J4(0, 1), J4(0, 2), J4(0, 3) };
+  J4 quaternion[4] = {J4(1, 0), J4(0, 1), J4(0, 2), J4(0, 3)};
   J4 axis_angle[3];
   J4 expected[3] = {
       MakeJ4(0, 0, 2.0, 0, 0),
@@ -863,20 +874,22 @@
 TEST(Quaternion, RotatePointGivesSameAnswerAsRotationByMatrixCanned) {
   // Canned data generated in octave.
   double const q[4] = {
-    +0.1956830471754074,
-    -0.0150618562474847,
-    +0.7634572982788086,
-    -0.3019454777240753,
+      +0.1956830471754074,
+      -0.0150618562474847,
+      +0.7634572982788086,
+      -0.3019454777240753,
   };
-  double const Q[3][3] = {  // Scaled rotation matrix.
-    { -0.6355194033477252,  0.0951730541682254,  0.3078870197911186 },
-    { -0.1411693904792992,  0.5297609702153905, -0.4551502574482019 },
-    { -0.2896955822708862, -0.4669396571547050, -0.4536309793389248 },
+  double const Q[3][3] = {
+      // Scaled rotation matrix.
+      {-0.6355194033477252, +0.0951730541682254, +0.3078870197911186},
+      {-0.1411693904792992, +0.5297609702153905, -0.4551502574482019},
+      {-0.2896955822708862, -0.4669396571547050, -0.4536309793389248},
   };
-  double const R[3][3] = {  // With unit rows and columns.
-    { -0.8918859164053080,  0.1335655625725649,  0.4320876677394745 },
-    { -0.1981166751680096,  0.7434648665444399, -0.6387564287225856 },
-    { -0.4065578619806013, -0.6553016349046693, -0.6366242786393164 },
+  double const R[3][3] = {
+      // With unit rows and columns.
+      {-0.8918859164053080, +0.1335655625725649, +0.4320876677394745},
+      {-0.1981166751680096, +0.7434648665444399, -0.6387564287225856},
+      {-0.4065578619806013, -0.6553016349046693, -0.6366242786393164},
   };
 
   // Compute R from q and compare to known answer.
@@ -889,19 +902,18 @@
   ExpectArraysClose(9, R[0], Rq[0], kTolerance);
 }
 
-
 TEST(Quaternion, RotatePointGivesSameAnswerAsRotationByMatrix) {
   // Rotation defined by a unit quaternion.
   double const q[4] = {
-    0.2318160216097109,
-    -0.0178430356832060,
-    0.9044300776717159,
-    -0.3576998641394597,
+      +0.2318160216097109,
+      -0.0178430356832060,
+      +0.9044300776717159,
+      -0.3576998641394597,
   };
   double const p[3] = {
-    +0.11,
-    -13.15,
-    1.17,
+      +0.11,
+      -13.15,
+      1.17,
   };
 
   double R[3 * 3];
@@ -911,11 +923,10 @@
   UnitQuaternionRotatePoint(q, p, result1);
 
   double result2[3];
-  VectorRef(result2, 3) = ConstMatrixRef(R, 3, 3)* ConstVectorRef(p, 3);
+  VectorRef(result2, 3) = ConstMatrixRef(R, 3, 3) * ConstVectorRef(p, 3);
   ExpectArraysClose(3, result1, result2, kTolerance);
 }
 
-
 // Verify that (a * b) * c == a * (b * c).
 TEST(Quaternion, MultiplicationIsAssociative) {
   double a[4];
@@ -943,7 +954,6 @@
   ASSERT_NEAR(ab_c[3], a_bc[3], kTolerance);
 }
 
-
 TEST(AngleAxis, RotatePointGivesSameAnswerAsRotationMatrix) {
   double angle_axis[3];
   double R[9];
@@ -973,6 +983,7 @@
 
       AngleAxisRotatePoint(angle_axis, p, angle_axis_rotated_p);
       for (int k = 0; k < 3; ++k) {
+        // clang-format off
         EXPECT_NEAR(rotation_matrix_rotated_p[k],
                     angle_axis_rotated_p[k],
                     kTolerance) << "p: " << p[0]
@@ -981,6 +992,7 @@
                                 << " angle_axis: " << angle_axis[0]
                                 << " " << angle_axis[1]
                                 << " " << angle_axis[2];
+        // clang-format on
       }
     }
   }
@@ -1001,7 +1013,7 @@
       norm2 = angle_axis[k] * angle_axis[k];
     }
 
-    double theta = (2.0 * i * 0.0001  - 1.0) * 1e-16;
+    double theta = (2.0 * i * 0.0001 - 1.0) * 1e-16;
     const double inv_norm = theta / sqrt(norm2);
     for (int k = 0; k < 3; ++k) {
       angle_axis[k] *= inv_norm;
@@ -1014,6 +1026,7 @@
 
     AngleAxisRotatePoint(angle_axis, p, angle_axis_rotated_p);
     for (int k = 0; k < 3; ++k) {
+      // clang-format off
       EXPECT_NEAR(rotation_matrix_rotated_p[k],
                   angle_axis_rotated_p[k],
                   kTolerance) << "p: " << p[0]
@@ -1022,14 +1035,15 @@
                               << " angle_axis: " << angle_axis[0]
                               << " " << angle_axis[1]
                               << " " << angle_axis[2];
+      // clang-format on
     }
   }
 }
 
 TEST(MatrixAdapter, RowMajor3x3ReturnTypeAndAccessIsCorrect) {
-  double array[9] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 };
-  const float const_array[9] =
-      { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f };
+  double array[9] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+  const float const_array[9] = {
+      1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
   MatrixAdapter<double, 3, 1> A = RowMajorAdapter3x3(array);
   MatrixAdapter<const float, 3, 1> B = RowMajorAdapter3x3(const_array);
 
@@ -1037,16 +1051,16 @@
     for (int j = 0; j < 3; ++j) {
       // The values are integers from 1 to 9, so equality tests are appropriate
       // even for float and double values.
-      EXPECT_EQ(A(i, j), array[3*i+j]);
-      EXPECT_EQ(B(i, j), const_array[3*i+j]);
+      EXPECT_EQ(A(i, j), array[3 * i + j]);
+      EXPECT_EQ(B(i, j), const_array[3 * i + j]);
     }
   }
 }
 
 TEST(MatrixAdapter, ColumnMajor3x3ReturnTypeAndAccessIsCorrect) {
-  double array[9] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 };
-  const float const_array[9] =
-      { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f };
+  double array[9] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
+  const float const_array[9] = {
+      1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
   MatrixAdapter<double, 1, 3> A = ColumnMajorAdapter3x3(array);
   MatrixAdapter<const float, 1, 3> B = ColumnMajorAdapter3x3(const_array);
 
@@ -1054,29 +1068,33 @@
     for (int j = 0; j < 3; ++j) {
       // The values are integers from 1 to 9, so equality tests are
       // appropriate even for float and double values.
-      EXPECT_EQ(A(i, j), array[3*j+i]);
-      EXPECT_EQ(B(i, j), const_array[3*j+i]);
+      EXPECT_EQ(A(i, j), array[3 * j + i]);
+      EXPECT_EQ(B(i, j), const_array[3 * j + i]);
     }
   }
 }
 
 TEST(MatrixAdapter, RowMajor2x4IsCorrect) {
-  const int expected[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+  const int expected[8] = {1, 2, 3, 4, 5, 6, 7, 8};
   int array[8];
   MatrixAdapter<int, 4, 1> M(array);
+  // clang-format off
   M(0, 0) = 1; M(0, 1) = 2; M(0, 2) = 3; M(0, 3) = 4;
   M(1, 0) = 5; M(1, 1) = 6; M(1, 2) = 7; M(1, 3) = 8;
+  // clang-format on
   for (int k = 0; k < 8; ++k) {
     EXPECT_EQ(array[k], expected[k]);
   }
 }
 
 TEST(MatrixAdapter, ColumnMajor2x4IsCorrect) {
-  const int expected[8] = { 1, 5, 2, 6, 3, 7, 4, 8 };
+  const int expected[8] = {1, 5, 2, 6, 3, 7, 4, 8};
   int array[8];
   MatrixAdapter<int, 1, 2> M(array);
+  // clang-format off
   M(0, 0) = 1; M(0, 1) = 2; M(0, 2) = 3; M(0, 3) = 4;
   M(1, 0) = 5; M(1, 1) = 6; M(1, 2) = 7; M(1, 3) = 8;
+  // clang-format on
   for (int k = 0; k < 8; ++k) {
     EXPECT_EQ(array[k], expected[k]);
   }
@@ -1084,11 +1102,13 @@
 
 TEST(RotationMatrixToAngleAxis, NearPiExampleOneFromTobiasStrauss) {
   // Example from Tobias Strauss
+  // clang-format off
   const double rotation_matrix[] = {
     -0.999807135425239,    -0.0128154391194470,   -0.0148814136745799,
     -0.0128154391194470,   -0.148441438622958,     0.988838158557669,
     -0.0148814136745799,    0.988838158557669,     0.148248574048196
   };
+  // clang-format on
 
   double angle_axis[3];
   RotationMatrixToAngleAxis(RowMajorAdapter3x3(rotation_matrix), angle_axis);
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 0083300..65e7854 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -139,10 +139,8 @@
     //
     // TODO(sameeragarwal): A more scalable template specialization
     // mechanism that does not cause binary bloat.
-    if (options_.row_block_size == 2 &&
-        options_.e_block_size == 3 &&
-        options_.f_block_size == 6 &&
-        num_f_blocks == 1) {
+    if (options_.row_block_size == 2 && options_.e_block_size == 3 &&
+        options_.f_block_size == 6 && num_f_blocks == 1) {
       eliminator_.reset(new SchurEliminatorForOneFBlock<2, 3, 6>);
     } else {
       eliminator_.reset(SchurEliminatorBase::Create(options_));
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
index 87f0478..464af09 100644
--- a/internal/ceres/schur_complement_solver.h
+++ b/internal/ceres/schur_complement_solver.h
@@ -46,8 +46,8 @@
 #include "ceres/types.h"
 
 #ifdef CERES_USE_EIGEN_SPARSE
-#include "Eigen/SparseCholesky"
 #include "Eigen/OrderingMethods"
+#include "Eigen/SparseCholesky"
 #endif
 
 namespace ceres {
@@ -179,8 +179,7 @@
       const LinearSolver::PerSolveOptions& per_solve_options,
       double* solution) final;
   LinearSolver::Summary SolveReducedLinearSystemUsingConjugateGradients(
-      const LinearSolver::PerSolveOptions& per_solve_options,
-      double* solution);
+      const LinearSolver::PerSolveOptions& per_solve_options, double* solution);
 
   // Size of the blocks in the Schur complement.
   std::vector<int> blocks_;
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index 23d3674..550733e 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -74,9 +74,8 @@
 
     std::unique_ptr<LinearSolver> qr(LinearSolver::Create(options));
 
-    TripletSparseMatrix triplet_A(A->num_rows(),
-                                  A->num_cols(),
-                                  A->num_nonzeros());
+    TripletSparseMatrix triplet_A(
+        A->num_rows(), A->num_cols(), A->num_nonzeros());
     A->ToTripletSparseMatrix(&triplet_A);
 
     // Gold standard solutions using dense QR factorization.
@@ -99,8 +98,8 @@
     SetUpFromProblemId(problem_id);
     LinearSolver::Options options;
     options.elimination_groups.push_back(num_eliminate_blocks);
-    options.elimination_groups.push_back(
-        A->block_structure()->cols.size() - num_eliminate_blocks);
+    options.elimination_groups.push_back(A->block_structure()->cols.size() -
+                                         num_eliminate_blocks);
     options.type = linear_solver_type;
     options.dense_linear_algebra_library_type =
         dense_linear_algebra_library_type;
@@ -127,7 +126,6 @@
     EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
 
     if (regularization) {
-
       ASSERT_NEAR((sol_d - x).norm() / num_cols, 0, 1e-10)
           << "Regularized Expected solution: " << sol_d.transpose()
           << " Actual solution: " << x.transpose();
@@ -207,42 +205,40 @@
 #endif  // CERES_NO_SUITESPARSE
 
 #ifndef CERES_NO_CXSPARSE
-TEST_F(SchurComplementSolverTest,
-       SparseSchurWithCXSparseSmallProblem) {
+TEST_F(SchurComplementSolverTest, SparseSchurWithCXSparseSmallProblem) {
   ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
   ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
 }
 
-TEST_F(SchurComplementSolverTest,
-       SparseSchurWithCXSparseLargeProblem) {
+TEST_F(SchurComplementSolverTest, SparseSchurWithCXSparseLargeProblem) {
   ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
   ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
 }
 #endif  // CERES_NO_CXSPARSE
 
 #ifndef CERES_NO_ACCELERATE_SPARSE
-TEST_F(SchurComplementSolverTest,
-       SparseSchurWithAccelerateSparseSmallProblem) {
-  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
-  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+TEST_F(SchurComplementSolverTest, SparseSchurWithAccelerateSparseSmallProblem) {
+  ComputeAndCompareSolutions(
+      2, false, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+  ComputeAndCompareSolutions(
+      2, true, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
 }
 
-TEST_F(SchurComplementSolverTest,
-       SparseSchurWithAccelerateSparseLargeProblem) {
-  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
-  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+TEST_F(SchurComplementSolverTest, SparseSchurWithAccelerateSparseLargeProblem) {
+  ComputeAndCompareSolutions(
+      3, false, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+  ComputeAndCompareSolutions(
+      3, true, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
 }
 #endif  // CERES_NO_ACCELERATE_SPARSE
 
 #ifdef CERES_USE_EIGEN_SPARSE
-TEST_F(SchurComplementSolverTest,
-       SparseSchurWithEigenSparseSmallProblem) {
+TEST_F(SchurComplementSolverTest, SparseSchurWithEigenSparseSmallProblem) {
   ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
   ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
 }
 
-TEST_F(SchurComplementSolverTest,
-       SparseSchurWithEigenSparseLargeProblem) {
+TEST_F(SchurComplementSolverTest, SparseSchurWithEigenSparseLargeProblem) {
   ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
   ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
 }
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
index bd0881e..1f0b4fa 100644
--- a/internal/ceres/schur_eliminator_impl.h
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -46,7 +46,9 @@
 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include <algorithm>
 #include <map>
@@ -152,7 +154,7 @@
       ++chunk.size;
     }
 
-    CHECK_GT(chunk.size, 0); // This check will need to be resolved.
+    CHECK_GT(chunk.size, 0);  // This check will need to be resolved.
     r += chunk.size;
   }
   const Chunk& chunk = chunks_.back();
@@ -174,13 +176,12 @@
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-Eliminate(const BlockSparseMatrixData& A,
-          const double* b,
-          const double* D,
-          BlockRandomAccessMatrix* lhs,
-          double* rhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Eliminate(
+    const BlockSparseMatrixData& A,
+    const double* b,
+    const double* D,
+    BlockRandomAccessMatrix* lhs,
+    double* rhs) {
   if (lhs->num_rows() > 0) {
     lhs->SetZero();
     if (rhs) {
@@ -193,27 +194,26 @@
 
   // Add the diagonal to the schur complement.
   if (D != NULL) {
-    ParallelFor(
-        context_,
-        num_eliminate_blocks_,
-        num_col_blocks,
-        num_threads_,
-        [&](int i) {
-          const int block_id = i - num_eliminate_blocks_;
-          int r, c, row_stride, col_stride;
-          CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c,
-                                             &row_stride, &col_stride);
-          if (cell_info != NULL) {
-            const int block_size = bs->cols[i].size;
-            typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
-                D + bs->cols[i].position, block_size);
+    ParallelFor(context_,
+                num_eliminate_blocks_,
+                num_col_blocks,
+                num_threads_,
+                [&](int i) {
+                  const int block_id = i - num_eliminate_blocks_;
+                  int r, c, row_stride, col_stride;
+                  CellInfo* cell_info = lhs->GetCell(
+                      block_id, block_id, &r, &c, &row_stride, &col_stride);
+                  if (cell_info != NULL) {
+                    const int block_size = bs->cols[i].size;
+                    typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
+                        D + bs->cols[i].position, block_size);
 
-            std::lock_guard<std::mutex> l(cell_info->m);
-            MatrixRef m(cell_info->values, row_stride, col_stride);
-            m.block(r, c, block_size, block_size).diagonal() +=
-                diag.array().square().matrix();
-          }
-        });
+                    std::lock_guard<std::mutex> l(cell_info->m);
+                    MatrixRef m(cell_info->values, row_stride, col_stride);
+                    m.block(r, c, block_size, block_size).diagonal() +=
+                        diag.array().square().matrix();
+                  }
+                });
   }
 
   // Eliminate y blocks one chunk at a time.  For each chunk, compute
@@ -242,12 +242,12 @@
 
         VectorRef(buffer, buffer_size_).setZero();
 
-        typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
-            ete(e_block_size, e_block_size);
+        typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size,
+                                                                  e_block_size);
 
         if (D != NULL) {
-          const typename EigenTypes<kEBlockSize>::ConstVectorRef
-              diag(D + bs->cols[e_block_id].position, e_block_size);
+          const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
+              D + bs->cols[e_block_id].position, e_block_size);
           ete = diag.array().square().matrix().asDiagonal();
         } else {
           ete.setZero();
@@ -299,31 +299,25 @@
 
         // S -= F'E(E'E)^{-1}E'F
         ChunkOuterProduct(
-        thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
+            thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
       });
 
   // For rows with no e_blocks, the schur complement update reduces to
   // S += F'F.
-  NoEBlockRowsUpdate(A, b,  uneliminated_row_begins_, lhs, rhs);
+  NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-BackSubstitute(const BlockSparseMatrixData& A,
-               const double* b,
-               const double* D,
-               const double* z,
-               double* y) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::BackSubstitute(
+    const BlockSparseMatrixData& A,
+    const double* b,
+    const double* D,
+    const double* z,
+    double* y) {
   const CompressedRowBlockStructure* bs = A.block_structure();
   const double* values = A.values();
 
-  ParallelFor(
-      context_,
-      0,
-      int(chunks_.size()),
-      num_threads_,
-      [&](int i) {
+  ParallelFor(context_, 0, int(chunks_.size()), num_threads_, [&](int i) {
     const Chunk& chunk = chunks_[i];
     const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
     const int e_block_size = bs->cols[e_block_id].size;
@@ -331,11 +325,11 @@
     double* y_ptr = y + bs->cols[e_block_id].position;
     typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
 
-    typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
-        ete(e_block_size, e_block_size);
+    typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size,
+                                                              e_block_size);
     if (D != NULL) {
-      const typename EigenTypes<kEBlockSize>::ConstVectorRef
-          diag(D + bs->cols[e_block_id].position, e_block_size);
+      const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
+          D + bs->cols[e_block_id].position, e_block_size);
       ete = diag.array().square().matrix().asDiagonal();
     } else {
       ete.setZero();
@@ -357,6 +351,7 @@
         const int f_block_size = bs->cols[f_block_id].size;
         const int r_block = f_block_id - num_eliminate_blocks_;
 
+        // clang-format off
         MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
             values + row.cells[c].position, row.block.size, f_block_size,
             z + lhs_row_layout_[r_block],
@@ -373,6 +368,7 @@
           values + e_cell.position, row.block.size, e_block_size,
           values + e_cell.position, row.block.size, e_block_size,
           ete.data(), 0, 0, e_block_size, e_block_size);
+      // clang-format on
     }
 
     y_block =
@@ -384,14 +380,13 @@
 //
 //   F'b - F'E(E'E)^(-1) E'b
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-UpdateRhs(const Chunk& chunk,
-          const BlockSparseMatrixData& A,
-          const double* b,
-          int row_block_counter,
-          const double* inverse_ete_g,
-          double* rhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::UpdateRhs(
+    const Chunk& chunk,
+    const BlockSparseMatrixData& A,
+    const double* b,
+    int row_block_counter,
+    const double* inverse_ete_g,
+    double* rhs) {
   const CompressedRowBlockStructure* bs = A.block_structure();
   const double* values = A.values();
 
@@ -403,22 +398,26 @@
     const Cell& e_cell = row.cells.front();
 
     typename EigenTypes<kRowBlockSize>::Vector sj =
-        typename EigenTypes<kRowBlockSize>::ConstVectorRef
-        (b + b_pos, row.block.size);
+        typename EigenTypes<kRowBlockSize>::ConstVectorRef(b + b_pos,
+                                                           row.block.size);
 
+    // clang-format off
     MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
         values + e_cell.position, row.block.size, e_block_size,
         inverse_ete_g, sj.data());
+    // clang-format on
 
     for (int c = 1; c < row.cells.size(); ++c) {
       const int block_id = row.cells[c].block_id;
       const int block_size = bs->cols[block_id].size;
       const int block = block_id - num_eliminate_blocks_;
       std::lock_guard<std::mutex> l(*rhs_locks_[block]);
+      // clang-format off
       MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
           values + row.cells[c].position,
           row.block.size, block_size,
           sj.data(), rhs + lhs_row_layout_[block]);
+      // clang-format on
     }
     b_pos += row.block.size;
   }
@@ -444,17 +443,16 @@
 //
 // and the gradient of the e_block, E'b.
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-ChunkDiagonalBlockAndGradient(
-    const Chunk& chunk,
-    const BlockSparseMatrixData& A,
-    const double* b,
-    int row_block_counter,
-    typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
-    double* g,
-    double* buffer,
-    BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    ChunkDiagonalBlockAndGradient(
+        const Chunk& chunk,
+        const BlockSparseMatrixData& A,
+        const double* b,
+        int row_block_counter,
+        typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
+        double* g,
+        double* buffer,
+        BlockRandomAccessMatrix* lhs) {
   const CompressedRowBlockStructure* bs = A.block_structure();
   const double* values = A.values();
 
@@ -474,18 +472,22 @@
 
     // Extract the e_block, ETE += E_i' E_i
     const Cell& e_cell = row.cells.front();
+    // clang-format off
     MatrixTransposeMatrixMultiply
         <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
             values + e_cell.position, row.block.size, e_block_size,
             values + e_cell.position, row.block.size, e_block_size,
             ete->data(), 0, 0, e_block_size, e_block_size);
+    // clang-format on
 
     if (b) {
       // g += E_i' b_i
+      // clang-format off
       MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
           values + e_cell.position, row.block.size, e_block_size,
           b + b_pos,
           g);
+      // clang-format on
     }
 
     // buffer = E'F. This computation is done by iterating over the
@@ -493,13 +495,14 @@
     for (int c = 1; c < row.cells.size(); ++c) {
       const int f_block_id = row.cells[c].block_id;
       const int f_block_size = bs->cols[f_block_id].size;
-      double* buffer_ptr =
-          buffer +  FindOrDie(chunk.buffer_layout, f_block_id);
+      double* buffer_ptr = buffer + FindOrDie(chunk.buffer_layout, f_block_id);
+      // clang-format off
       MatrixTransposeMatrixMultiply
           <kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
           values + e_cell.position, row.block.size, e_block_size,
           values + row.cells[c].position, row.block.size, f_block_size,
           buffer_ptr, 0, 0, e_block_size, f_block_size);
+      // clang-format on
     }
     b_pos += row.block.size;
   }
@@ -510,14 +513,13 @@
 //
 //  S -= F'E(E'E)^{-1}E'F.
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-ChunkOuterProduct(int thread_id,
-                  const CompressedRowBlockStructure* bs,
-                  const Matrix& inverse_ete,
-                  const double* buffer,
-                  const BufferLayoutType& buffer_layout,
-                  BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    ChunkOuterProduct(int thread_id,
+                      const CompressedRowBlockStructure* bs,
+                      const Matrix& inverse_ete,
+                      const double* buffer,
+                      const BufferLayoutType& buffer_layout,
+                      BlockRandomAccessMatrix* lhs) {
   // This is the most computationally expensive part of this
   // code. Profiling experiments reveal that the bottleneck is not the
   // computation of the right-hand matrix product, but memory
@@ -532,28 +534,31 @@
   for (; it1 != buffer_layout.end(); ++it1) {
     const int block1 = it1->first - num_eliminate_blocks_;
     const int block1_size = bs->cols[it1->first].size;
+    // clang-format off
     MatrixTransposeMatrixMultiply
         <kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
         buffer + it1->second, e_block_size, block1_size,
         inverse_ete.data(), e_block_size, e_block_size,
         b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
+    // clang-format on
 
     BufferLayoutType::const_iterator it2 = it1;
     for (; it2 != buffer_layout.end(); ++it2) {
       const int block2 = it2->first - num_eliminate_blocks_;
 
       int r, c, row_stride, col_stride;
-      CellInfo* cell_info = lhs->GetCell(block1, block2,
-                                         &r, &c,
-                                         &row_stride, &col_stride);
+      CellInfo* cell_info =
+          lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
       if (cell_info != NULL) {
         const int block2_size = bs->cols[it2->first].size;
         std::lock_guard<std::mutex> l(cell_info->m);
+        // clang-format off
         MatrixMatrixMultiply
             <kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
                 b1_transpose_inverse_ete, block1_size, e_block_size,
                 buffer  + it2->second, e_block_size, block2_size,
                 cell_info->values, r, c, row_stride, col_stride);
+        // clang-format on
       }
     }
   }
@@ -563,13 +568,12 @@
 // += F'F. This function iterates over the rows of A with no e_block,
 // and calls NoEBlockRowOuterProduct on each row.
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
-                   const double* b,
-                   int row_block_counter,
-                   BlockRandomAccessMatrix* lhs,
-                   double* rhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
+                       const double* b,
+                       int row_block_counter,
+                       BlockRandomAccessMatrix* lhs,
+                       double* rhs) {
   const CompressedRowBlockStructure* bs = A.block_structure();
   const double* values = A.values();
   for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
@@ -582,15 +586,16 @@
       const int block_id = row.cells[c].block_id;
       const int block_size = bs->cols[block_id].size;
       const int block = block_id - num_eliminate_blocks_;
+      // clang-format off
       MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
           values + row.cells[c].position, row.block.size, block_size,
           b + row.block.position,
           rhs + lhs_row_layout_[block]);
+      // clang-format on
     }
   }
 }
 
-
 // A row r of A, which has no e_blocks gets added to the Schur
 // Complement as S += r r'. This function is responsible for computing
 // the contribution of a single row r to the Schur complement. It is
@@ -606,11 +611,10 @@
 // dynamic. Since the number of rows without e_blocks is small, the
 // lack of templating is not an issue.
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
-                        int row_block_index,
-                        BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
+                            int row_block_index,
+                            BlockRandomAccessMatrix* lhs) {
   const CompressedRowBlockStructure* bs = A.block_structure();
   const double* values = A.values();
 
@@ -621,18 +625,19 @@
 
     const int block1_size = bs->cols[row.cells[i].block_id].size;
     int r, c, row_stride, col_stride;
-    CellInfo* cell_info = lhs->GetCell(block1, block1,
-                                       &r, &c,
-                                       &row_stride, &col_stride);
+    CellInfo* cell_info =
+        lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride);
     if (cell_info != NULL) {
       std::lock_guard<std::mutex> l(cell_info->m);
       // This multiply currently ignores the fact that this is a
       // symmetric outer product.
+      // clang-format off
       MatrixTransposeMatrixMultiply
           <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
               values + row.cells[i].position, row.block.size, block1_size,
               values + row.cells[i].position, row.block.size, block1_size,
               cell_info->values, r, c, row_stride, col_stride);
+      // clang-format on
     }
 
     for (int j = i + 1; j < row.cells.size(); ++j) {
@@ -640,17 +645,18 @@
       DCHECK_GE(block2, 0);
       DCHECK_LT(block1, block2);
       int r, c, row_stride, col_stride;
-      CellInfo* cell_info = lhs->GetCell(block1, block2,
-                                         &r, &c,
-                                         &row_stride, &col_stride);
+      CellInfo* cell_info =
+          lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
       if (cell_info != NULL) {
         const int block2_size = bs->cols[row.cells[j].block_id].size;
         std::lock_guard<std::mutex> l(cell_info->m);
+        // clang-format off
         MatrixTransposeMatrixMultiply
             <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
                 values + row.cells[i].position, row.block.size, block1_size,
                 values + row.cells[j].position, row.block.size, block2_size,
                 cell_info->values, r, c, row_stride, col_stride);
+        // clang-format on
       }
     }
   }
@@ -660,11 +666,10 @@
 // function has the same structure as NoEBlockRowOuterProduct, except
 // that this function uses the template parameters.
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-EBlockRowOuterProduct(const BlockSparseMatrixData& A,
-                      int row_block_index,
-                      BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    EBlockRowOuterProduct(const BlockSparseMatrixData& A,
+                          int row_block_index,
+                          BlockRandomAccessMatrix* lhs) {
   const CompressedRowBlockStructure* bs = A.block_structure();
   const double* values = A.values();
 
@@ -675,17 +680,18 @@
 
     const int block1_size = bs->cols[row.cells[i].block_id].size;
     int r, c, row_stride, col_stride;
-    CellInfo* cell_info = lhs->GetCell(block1, block1,
-                                       &r, &c,
-                                       &row_stride, &col_stride);
+    CellInfo* cell_info =
+        lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride);
     if (cell_info != NULL) {
       std::lock_guard<std::mutex> l(cell_info->m);
       // block += b1.transpose() * b1;
+      // clang-format off
       MatrixTransposeMatrixMultiply
           <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
           values + row.cells[i].position, row.block.size, block1_size,
           values + row.cells[i].position, row.block.size, block1_size,
           cell_info->values, r, c, row_stride, col_stride);
+      // clang-format on
     }
 
     for (int j = i + 1; j < row.cells.size(); ++j) {
@@ -694,17 +700,18 @@
       DCHECK_LT(block1, block2);
       const int block2_size = bs->cols[row.cells[j].block_id].size;
       int r, c, row_stride, col_stride;
-      CellInfo* cell_info = lhs->GetCell(block1, block2,
-                                         &r, &c,
-                                         &row_stride, &col_stride);
+      CellInfo* cell_info =
+          lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
       if (cell_info != NULL) {
         // block += b1.transpose() * b2;
         std::lock_guard<std::mutex> l(cell_info->m);
+        // clang-format off
         MatrixTransposeMatrixMultiply
             <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
                 values + row.cells[i].position, row.block.size, block1_size,
                 values + row.cells[j].position, row.block.size, block2_size,
                 cell_info->values, r, c, row_stride, col_stride);
+        // clang-format on
       }
     }
   }
diff --git a/internal/ceres/scratch_evaluate_preparer.cc b/internal/ceres/scratch_evaluate_preparer.cc
index f01ef11..9905b22 100644
--- a/internal/ceres/scratch_evaluate_preparer.cc
+++ b/internal/ceres/scratch_evaluate_preparer.cc
@@ -37,9 +37,8 @@
 namespace ceres {
 namespace internal {
 
-ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(
-    const Program &program,
-    int num_threads) {
+ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(const Program& program,
+                                                         int num_threads) {
   ScratchEvaluatePreparer* preparers = new ScratchEvaluatePreparer[num_threads];
   int max_derivatives_per_residual_block =
       program.MaxDerivativesPerResidualBlock();
@@ -50,8 +49,7 @@
 }
 
 void ScratchEvaluatePreparer::Init(int max_derivatives_per_residual_block) {
-  jacobian_scratch_.reset(
-      new double[max_derivatives_per_residual_block]);
+  jacobian_scratch_.reset(new double[max_derivatives_per_residual_block]);
 }
 
 // Point the jacobian blocks into the scratch area of this evaluate preparer.
diff --git a/internal/ceres/scratch_evaluate_preparer.h b/internal/ceres/scratch_evaluate_preparer.h
index c8d9b93..2d2745d 100644
--- a/internal/ceres/scratch_evaluate_preparer.h
+++ b/internal/ceres/scratch_evaluate_preparer.h
@@ -47,7 +47,7 @@
 class ScratchEvaluatePreparer {
  public:
   // Create num_threads ScratchEvaluatePreparers.
-  static ScratchEvaluatePreparer* Create(const Program &program,
+  static ScratchEvaluatePreparer* Create(const Program& program,
                                          int num_threads);
 
   // EvaluatePreparer interface
diff --git a/internal/ceres/single_linkage_clustering.cc b/internal/ceres/single_linkage_clustering.cc
index 394492c..0e78131 100644
--- a/internal/ceres/single_linkage_clustering.cc
+++ b/internal/ceres/single_linkage_clustering.cc
@@ -30,8 +30,9 @@
 
 #include "ceres/single_linkage_clustering.h"
 
-#include <unordered_set>
 #include <unordered_map>
+#include <unordered_set>
+
 #include "ceres/graph.h"
 #include "ceres/graph_algorithms.h"
 
diff --git a/internal/ceres/single_linkage_clustering.h b/internal/ceres/single_linkage_clustering.h
index ccd6f8e..ef6bff4 100644
--- a/internal/ceres/single_linkage_clustering.h
+++ b/internal/ceres/single_linkage_clustering.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
 
 #include <unordered_map>
+
 #include "ceres/graph.h"
 
 namespace ceres {
diff --git a/internal/ceres/single_linkage_clustering_test.cc b/internal/ceres/single_linkage_clustering_test.cc
index 281c281..28c7c41 100644
--- a/internal/ceres/single_linkage_clustering_test.cc
+++ b/internal/ceres/single_linkage_clustering_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/single_linkage_clustering.h"
 
 #include <unordered_map>
+
 #include "ceres/graph.h"
 #include "gtest/gtest.h"
 
diff --git a/internal/ceres/small_blas.h b/internal/ceres/small_blas.h
index 81c5872..4ee9229 100644
--- a/internal/ceres/small_blas.h
+++ b/internal/ceres/small_blas.h
@@ -35,8 +35,8 @@
 #ifndef CERES_INTERNAL_SMALL_BLAS_H_
 #define CERES_INTERNAL_SMALL_BLAS_H_
 
-#include "ceres/internal/port.h"
 #include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
 #include "glog/logging.h"
 #include "small_blas_generic.h"
 
@@ -46,7 +46,7 @@
 // The following three macros are used to share code and reduce
 // template junk across the various GEMM variants.
 #define CERES_GEMM_BEGIN(name)                                          \
-  template<int kRowA, int kColA, int kRowB, int kColB, int kOperation>  \
+  template <int kRowA, int kColA, int kRowB, int kColB, int kOperation> \
   inline void name(const double* A,                                     \
                    const int num_row_a,                                 \
                    const int num_col_a,                                 \
@@ -59,56 +59,58 @@
                    const int row_stride_c,                              \
                    const int col_stride_c)
 
-#define CERES_GEMM_NAIVE_HEADER                                         \
-  DCHECK_GT(num_row_a, 0);                                              \
-  DCHECK_GT(num_col_a, 0);                                              \
-  DCHECK_GT(num_row_b, 0);                                              \
-  DCHECK_GT(num_col_b, 0);                                              \
-  DCHECK_GE(start_row_c, 0);                                            \
-  DCHECK_GE(start_col_c, 0);                                            \
-  DCHECK_GT(row_stride_c, 0);                                           \
-  DCHECK_GT(col_stride_c, 0);                                           \
-  DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a));            \
-  DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a));            \
-  DCHECK((kRowB == Eigen::Dynamic) || (kRowB == num_row_b));            \
-  DCHECK((kColB == Eigen::Dynamic) || (kColB == num_col_b));            \
-  const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a);  \
-  const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a);  \
-  const int NUM_ROW_B = (kRowB != Eigen::Dynamic ? kRowB : num_row_b);  \
+#define CERES_GEMM_NAIVE_HEADER                                        \
+  DCHECK_GT(num_row_a, 0);                                             \
+  DCHECK_GT(num_col_a, 0);                                             \
+  DCHECK_GT(num_row_b, 0);                                             \
+  DCHECK_GT(num_col_b, 0);                                             \
+  DCHECK_GE(start_row_c, 0);                                           \
+  DCHECK_GE(start_col_c, 0);                                           \
+  DCHECK_GT(row_stride_c, 0);                                          \
+  DCHECK_GT(col_stride_c, 0);                                          \
+  DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a));           \
+  DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a));           \
+  DCHECK((kRowB == Eigen::Dynamic) || (kRowB == num_row_b));           \
+  DCHECK((kColB == Eigen::Dynamic) || (kColB == num_col_b));           \
+  const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a); \
+  const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a); \
+  const int NUM_ROW_B = (kRowB != Eigen::Dynamic ? kRowB : num_row_b); \
   const int NUM_COL_B = (kColB != Eigen::Dynamic ? kColB : num_col_b);
 
-#define CERES_GEMM_EIGEN_HEADER                                         \
-  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef               \
-  Aref(A, num_row_a, num_col_a);                                        \
-  const typename EigenTypes<kRowB, kColB>::ConstMatrixRef               \
-  Bref(B, num_row_b, num_col_b);                                        \
-  MatrixRef Cref(C, row_stride_c, col_stride_c);                        \
+#define CERES_GEMM_EIGEN_HEADER                                 \
+  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef Aref( \
+      A, num_row_a, num_col_a);                                 \
+  const typename EigenTypes<kRowB, kColB>::ConstMatrixRef Bref( \
+      B, num_row_b, num_col_b);                                 \
+  MatrixRef Cref(C, row_stride_c, col_stride_c);
 
+// clang-format off
 #define CERES_CALL_GEMM(name)                                           \
   name<kRowA, kColA, kRowB, kColB, kOperation>(                         \
       A, num_row_a, num_col_a,                                          \
       B, num_row_b, num_col_b,                                          \
       C, start_row_c, start_col_c, row_stride_c, col_stride_c);
+// clang-format on
 
-#define CERES_GEMM_STORE_SINGLE(p, index, value)                        \
-  if (kOperation > 0) {                                                 \
-    p[index] += value;                                                  \
-  } else if (kOperation < 0) {                                          \
-    p[index] -= value;                                                  \
-  } else {                                                              \
-    p[index] = value;                                                   \
+#define CERES_GEMM_STORE_SINGLE(p, index, value) \
+  if (kOperation > 0) {                          \
+    p[index] += value;                           \
+  } else if (kOperation < 0) {                   \
+    p[index] -= value;                           \
+  } else {                                       \
+    p[index] = value;                            \
   }
 
-#define CERES_GEMM_STORE_PAIR(p, index, v1, v2)                         \
-  if (kOperation > 0) {                                                 \
-    p[index] += v1;                                                     \
-    p[index + 1] += v2;                                                 \
-  } else if (kOperation < 0) {                                          \
-    p[index] -= v1;                                                     \
-    p[index + 1] -= v2;                                                 \
-  } else {                                                              \
-    p[index] = v1;                                                      \
-    p[index + 1] = v2;                                                  \
+#define CERES_GEMM_STORE_PAIR(p, index, v1, v2) \
+  if (kOperation > 0) {                         \
+    p[index] += v1;                             \
+    p[index + 1] += v2;                         \
+  } else if (kOperation < 0) {                  \
+    p[index] -= v1;                             \
+    p[index + 1] -= v2;                         \
+  } else {                                      \
+    p[index] = v1;                              \
+    p[index + 1] = v2;                          \
   }
 
 // For the matrix-matrix functions below, there are three variants for
@@ -161,8 +163,8 @@
 //
 CERES_GEMM_BEGIN(MatrixMatrixMultiplyEigen) {
   CERES_GEMM_EIGEN_HEADER
-  Eigen::Block<MatrixRef, kRowA, kColB>
-    block(Cref, start_row_c, start_col_c, num_row_a, num_col_b);
+  Eigen::Block<MatrixRef, kRowA, kColB> block(
+      Cref, start_row_c, start_col_c, num_row_a, num_col_b);
 
   if (kOperation > 0) {
     block.noalias() += Aref * Bref;
@@ -208,7 +210,7 @@
 
   // Process the couple columns in remainder if present.
   if (NUM_COL_C & 2) {
-    int col = NUM_COL_C & (int)(~(span - 1)) ;
+    int col = NUM_COL_C & (int)(~(span - 1));
     const double* pa = &A[0];
     for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
       const double* pb = &B[col];
@@ -234,11 +236,12 @@
   for (int col = 0; col < col_m; col += span) {
     for (int row = 0; row < NUM_ROW_C; ++row) {
       const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      // clang-format off
       MMM_mat1x4(NUM_COL_A, &A[row * NUM_COL_A],
                  &B[col], NUM_COL_B, &C[index], kOperation);
+      // clang-format on
     }
   }
-
 }
 
 CERES_GEMM_BEGIN(MatrixMatrixMultiply) {
@@ -261,9 +264,11 @@
 
 CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyEigen) {
   CERES_GEMM_EIGEN_HEADER
+  // clang-format off
   Eigen::Block<MatrixRef, kColA, kColB> block(Cref,
                                               start_row_c, start_col_c,
                                               num_col_a, num_col_b);
+  // clang-format on
   if (kOperation > 0) {
     block.noalias() += Aref.transpose() * Bref;
   } else if (kOperation < 0) {
@@ -310,7 +315,7 @@
 
   // Process the couple columns in remainder if present.
   if (NUM_COL_C & 2) {
-    int col = NUM_COL_C & (int)(~(span - 1)) ;
+    int col = NUM_COL_C & (int)(~(span - 1));
     for (int row = 0; row < NUM_ROW_C; ++row) {
       const double* pa = &A[row];
       const double* pb = &B[col];
@@ -338,11 +343,12 @@
   for (int col = 0; col < col_m; col += span) {
     for (int row = 0; row < NUM_ROW_C; ++row) {
       const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      // clang-format off
       MTM_mat1x4(NUM_ROW_A, &A[row], NUM_COL_A,
                  &B[col], NUM_COL_B, &C[index], kOperation);
+      // clang-format on
     }
   }
-
 }
 
 CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiply) {
@@ -376,15 +382,15 @@
 // kOperation =  1  -> c += A' * b
 // kOperation = -1  -> c -= A' * b
 // kOperation =  0  -> c  = A' * b
-template<int kRowA, int kColA, int kOperation>
+template <int kRowA, int kColA, int kOperation>
 inline void MatrixVectorMultiply(const double* A,
                                  const int num_row_a,
                                  const int num_col_a,
                                  const double* b,
                                  double* c) {
 #ifdef CERES_NO_CUSTOM_BLAS
-  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef
-      Aref(A, num_row_a, num_col_a);
+  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef Aref(
+      A, num_row_a, num_col_a);
   const typename EigenTypes<kColA>::ConstVectorRef bref(b, num_col_a);
   typename EigenTypes<kRowA>::VectorRef cref(c, num_row_a);
 
@@ -412,7 +418,7 @@
 
   // Process the last odd row if present.
   if (NUM_ROW_A & 1) {
-    int row  = NUM_ROW_A - 1;
+    int row = NUM_ROW_A - 1;
     const double* pa = &A[row * NUM_COL_A];
     const double* pb = &b[0];
     double tmp = 0.0;
@@ -450,8 +456,10 @@
   // Calculate the main part with multiples of 4.
   int row_m = NUM_ROW_A & (int)(~(span - 1));
   for (int row = 0; row < row_m; row += span) {
+    // clang-format off
     MVM_mat4x1(NUM_COL_A, &A[row * NUM_COL_A], NUM_COL_A,
                &b[0], &c[row], kOperation);
+    // clang-format on
   }
 
 #endif  // CERES_NO_CUSTOM_BLAS
@@ -460,15 +468,15 @@
 // Similar to MatrixVectorMultiply, except that A is transposed, i.e.,
 //
 // c op A' * b;
-template<int kRowA, int kColA, int kOperation>
+template <int kRowA, int kColA, int kOperation>
 inline void MatrixTransposeVectorMultiply(const double* A,
                                           const int num_row_a,
                                           const int num_col_a,
                                           const double* b,
                                           double* c) {
 #ifdef CERES_NO_CUSTOM_BLAS
-  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef
-      Aref(A, num_row_a, num_col_a);
+  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef Aref(
+      A, num_row_a, num_col_a);
   const typename EigenTypes<kRowA>::ConstVectorRef bref(b, num_row_a);
   typename EigenTypes<kColA>::VectorRef cref(c, num_col_a);
 
@@ -496,7 +504,7 @@
 
   // Process the last odd column if present.
   if (NUM_COL_A & 1) {
-    int row  = NUM_COL_A - 1;
+    int row = NUM_COL_A - 1;
     const double* pa = &A[row];
     const double* pb = &b[0];
     double tmp = 0.0;
@@ -519,10 +527,12 @@
     const double* pb = &b[0];
     double tmp1 = 0.0, tmp2 = 0.0;
     for (int col = 0; col < NUM_ROW_A; ++col) {
+      // clang-format off
       double bv = *pb++;
       tmp1 += *(pa    ) * bv;
       tmp2 += *(pa + 1) * bv;
       pa += NUM_COL_A;
+      // clang-format on
     }
     CERES_GEMM_STORE_PAIR(c, row, tmp1, tmp2);
 
@@ -535,8 +545,10 @@
   // Calculate the main part with multiples of 4.
   int row_m = NUM_COL_A & (int)(~(span - 1));
   for (int row = 0; row < row_m; row += span) {
+    // clang-format off
     MTV_mat4x1(NUM_ROW_A, &A[row], NUM_COL_A,
                &b[0], &c[row], kOperation);
+    // clang-format on
   }
 
 #endif  // CERES_NO_CUSTOM_BLAS
diff --git a/internal/ceres/small_blas_gemm_benchmark.cc b/internal/ceres/small_blas_gemm_benchmark.cc
index 0a760a5..aa6c41d 100644
--- a/internal/ceres/small_blas_gemm_benchmark.cc
+++ b/internal/ceres/small_blas_gemm_benchmark.cc
@@ -29,6 +29,7 @@
 // Authors: sameeragarwal@google.com (Sameer Agarwal)
 
 #include <iostream>
+
 #include "Eigen/Dense"
 #include "benchmark/benchmark.h"
 #include "ceres/small_blas.h"
@@ -103,11 +104,13 @@
   int iter = 0;
   for (auto _ : state) {
     // a += b * c
+    // clang-format off
     MatrixMatrixMultiply
         <Eigen::Dynamic, Eigen::Dynamic,Eigen::Dynamic,Eigen::Dynamic, 1>
         (data.GetB(iter), b_rows, b_cols,
          data.GetC(iter), c_rows, c_cols,
          data.GetA(iter), 0, 0, a_rows, a_cols);
+    // clang-format on
     iter = (iter + 1) % num_elements;
   }
 }
@@ -147,11 +150,13 @@
   int iter = 0;
   for (auto _ : state) {
     // a += b' * c
+    // clang-format off
     MatrixTransposeMatrixMultiply
         <Eigen::Dynamic,Eigen::Dynamic,Eigen::Dynamic,Eigen::Dynamic, 1>
         (data.GetB(iter), b_rows, b_cols,
          data.GetC(iter), c_rows, c_cols,
          data.GetA(iter), 0, 0, a_rows, a_cols);
+    // clang-format on
     iter = (iter + 1) % num_elements;
   }
 }
@@ -159,7 +164,7 @@
 BENCHMARK(BM_MatrixTransposeMatrixMultiplyDynamic)
     ->Apply(MatrixTransposeMatrixMultiplySizeArguments);
 
-}  // internal
+}  // namespace internal
 }  // namespace ceres
 
 BENCHMARK_MAIN();
diff --git a/internal/ceres/small_blas_generic.h b/internal/ceres/small_blas_generic.h
index 978c5d5..3f3ea42 100644
--- a/internal/ceres/small_blas_generic.h
+++ b/internal/ceres/small_blas_generic.h
@@ -39,33 +39,33 @@
 namespace internal {
 
 // The following macros are used to share code
-#define CERES_GEMM_OPT_NAIVE_HEADER              \
-  double c0 = 0.0;                               \
-  double c1 = 0.0;                               \
-  double c2 = 0.0;                               \
-  double c3 = 0.0;                               \
-  const double* pa = a;                          \
-  const double* pb = b;                          \
-  const int span = 4;                            \
-  int col_r = col_a & (span - 1);                \
+#define CERES_GEMM_OPT_NAIVE_HEADER \
+  double c0 = 0.0;                  \
+  double c1 = 0.0;                  \
+  double c2 = 0.0;                  \
+  double c3 = 0.0;                  \
+  const double* pa = a;             \
+  const double* pb = b;             \
+  const int span = 4;               \
+  int col_r = col_a & (span - 1);   \
   int col_m = col_a - col_r;
 
-#define CERES_GEMM_OPT_STORE_MAT1X4              \
-  if (kOperation > 0) {                          \
-    *c++ += c0;                                  \
-    *c++ += c1;                                  \
-    *c++ += c2;                                  \
-    *c++ += c3;                                  \
-  } else if (kOperation < 0) {                   \
-    *c++ -= c0;                                  \
-    *c++ -= c1;                                  \
-    *c++ -= c2;                                  \
-    *c++ -= c3;                                  \
-  } else {                                       \
-    *c++ = c0;                                   \
-    *c++ = c1;                                   \
-    *c++ = c2;                                   \
-    *c++ = c3;                                   \
+#define CERES_GEMM_OPT_STORE_MAT1X4 \
+  if (kOperation > 0) {             \
+    *c++ += c0;                     \
+    *c++ += c1;                     \
+    *c++ += c2;                     \
+    *c++ += c3;                     \
+  } else if (kOperation < 0) {      \
+    *c++ -= c0;                     \
+    *c++ -= c1;                     \
+    *c++ -= c2;                     \
+    *c++ -= c3;                     \
+  } else {                          \
+    *c++ = c0;                      \
+    *c++ = c1;                      \
+    *c++ = c2;                      \
+    *c++ = c3;                      \
   }
 
 // Matrix-Matrix Multiplication
@@ -97,14 +97,14 @@
   double av = 0.0;
   int bi = 0;
 
-#define CERES_GEMM_OPT_MMM_MAT1X4_MUL  \
-  av = pa[k];                          \
-  pb = b + bi;                         \
-  c0 += av * *pb++;                    \
-  c1 += av * *pb++;                    \
-  c2 += av * *pb++;                    \
-  c3 += av * *pb++;                    \
-  bi += col_stride_b;                  \
+#define CERES_GEMM_OPT_MMM_MAT1X4_MUL \
+  av = pa[k];                         \
+  pb = b + bi;                        \
+  c0 += av * *pb++;                   \
+  c1 += av * *pb++;                   \
+  c2 += av * *pb++;                   \
+  c3 += av * *pb++;                   \
+  bi += col_stride_b;                 \
   k++;
 
   for (int k = 0; k < col_m;) {
@@ -164,14 +164,14 @@
   int ai = 0;
   int bi = 0;
 
-#define CERES_GEMM_OPT_MTM_MAT1X4_MUL  \
-  av = pa[ai];                         \
-  pb = b + bi;                         \
-  c0 += av * *pb++;                    \
-  c1 += av * *pb++;                    \
-  c2 += av * *pb++;                    \
-  c3 += av * *pb++;                    \
-  ai += col_stride_a;                  \
+#define CERES_GEMM_OPT_MTM_MAT1X4_MUL \
+  av = pa[ai];                        \
+  pb = b + bi;                        \
+  c0 += av * *pb++;                   \
+  c1 += av * *pb++;                   \
+  c2 += av * *pb++;                   \
+  c3 += av * *pb++;                   \
+  ai += col_stride_a;                 \
   bi += col_stride_b;
 
   for (int k = 0; k < col_m; k += span) {
@@ -218,14 +218,16 @@
   CERES_GEMM_OPT_NAIVE_HEADER
   double bv = 0.0;
 
-#define CERES_GEMM_OPT_MVM_MAT4X1_MUL              \
-  bv = *pb;                                        \
-  c0 += *(pa                   ) * bv;             \
-  c1 += *(pa + col_stride_a    ) * bv;             \
-  c2 += *(pa + col_stride_a * 2) * bv;             \
-  c3 += *(pa + col_stride_a * 3) * bv;             \
-  pa++;                                            \
+  // clang-format off
+#define CERES_GEMM_OPT_MVM_MAT4X1_MUL  \
+  bv = *pb;                            \
+  c0 += *(pa                   ) * bv; \
+  c1 += *(pa + col_stride_a    ) * bv; \
+  c2 += *(pa + col_stride_a * 2) * bv; \
+  c3 += *(pa + col_stride_a * 3) * bv; \
+  pa++;                                \
   pb++;
+  // clang-format on
 
   for (int k = 0; k < col_m; k += span) {
     CERES_GEMM_OPT_MVM_MAT4X1_MUL
@@ -281,14 +283,16 @@
   CERES_GEMM_OPT_NAIVE_HEADER
   double bv = 0.0;
 
-#define CERES_GEMM_OPT_MTV_MAT4X1_MUL  \
-  bv = *pb;                            \
-  c0 += *(pa    ) * bv;                \
-  c1 += *(pa + 1) * bv;                \
-  c2 += *(pa + 2) * bv;                \
-  c3 += *(pa + 3) * bv;                \
-  pa += col_stride_a;                  \
+  // clang-format off
+#define CERES_GEMM_OPT_MTV_MAT4X1_MUL \
+  bv = *pb;                           \
+  c0 += *(pa    ) * bv;               \
+  c1 += *(pa + 1) * bv;               \
+  c2 += *(pa + 2) * bv;               \
+  c3 += *(pa + 3) * bv;               \
+  pa += col_stride_a;                 \
   pb++;
+  // clang-format on
 
   for (int k = 0; k < col_m; k += span) {
     CERES_GEMM_OPT_MTV_MAT4X1_MUL
diff --git a/internal/ceres/small_blas_test.cc b/internal/ceres/small_blas_test.cc
index 2914244..6f819c4 100644
--- a/internal/ceres/small_blas_test.cc
+++ b/internal/ceres/small_blas_test.cc
@@ -31,8 +31,9 @@
 #include "ceres/small_blas.h"
 
 #include <limits>
-#include "gtest/gtest.h"
+
 #include "ceres/internal/eigen.h"
+#include "gtest/gtest.h"
 
 namespace ceres {
 namespace internal {
@@ -62,6 +63,7 @@
       Matrix C_plus_ref = C;
       Matrix C_minus_ref = C;
       Matrix C_assign_ref = C;
+      // clang-format off
       for (int start_row_c = 0; start_row_c + kRowA < row_stride_c; ++start_row_c) {
         for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
           C_plus_ref.block(start_row_c, start_col_c, kRowA, kColB) +=
@@ -81,7 +83,6 @@
               << "Cref : \n" << C_plus_ref << "\n"
               << "C: \n" << C_plus;
 
-
           C_minus_ref.block(start_row_c, start_col_c, kRowA, kColB) -=
               A * B;
 
@@ -117,6 +118,7 @@
               << "C: \n" << C_assign;
         }
       }
+      // clang-format on
     }
   }
 }
@@ -133,7 +135,7 @@
   B.setOnes();
 
   for (int row_stride_c = kColA; row_stride_c < 3 * kColA; ++row_stride_c) {
-    for (int col_stride_c = kColB; col_stride_c <  3 * kColB; ++col_stride_c) {
+    for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
       Matrix C(row_stride_c, col_stride_c);
       C.setOnes();
 
@@ -144,6 +146,7 @@
       Matrix C_plus_ref = C;
       Matrix C_minus_ref = C;
       Matrix C_assign_ref = C;
+      // clang-format off
       for (int start_row_c = 0; start_row_c + kColA < row_stride_c; ++start_row_c) {
         for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
           C_plus_ref.block(start_row_c, start_col_c, kColA, kColB) +=
@@ -198,6 +201,7 @@
               << "C: \n" << C_assign;
         }
       }
+      // clang-format on
     }
   }
 }
@@ -228,6 +232,7 @@
       Matrix C_plus_ref = C;
       Matrix C_minus_ref = C;
       Matrix C_assign_ref = C;
+      // clang-format off
       for (int start_row_c = 0; start_row_c + kRowA < row_stride_c; ++start_row_c) {
         for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
           C_plus_ref.block(start_row_c, start_col_c, kRowA, kColB) +=
@@ -247,7 +252,6 @@
               << "Cref : \n" << C_plus_ref << "\n"
               << "C: \n" << C_plus;
 
-
           C_minus_ref.block(start_row_c, start_col_c, kRowA, kColB) -=
               A * B;
 
@@ -283,6 +287,7 @@
               << "C: \n" << C_assign;
         }
       }
+      // clang-format on
     }
   }
 }
@@ -299,7 +304,7 @@
   B.setOnes();
 
   for (int row_stride_c = kColA; row_stride_c < 3 * kColA; ++row_stride_c) {
-    for (int col_stride_c = kColB; col_stride_c <  3 * kColB; ++col_stride_c) {
+    for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
       Matrix C(row_stride_c, col_stride_c);
       C.setOnes();
 
@@ -310,6 +315,7 @@
       Matrix C_plus_ref = C;
       Matrix C_minus_ref = C;
       Matrix C_assign_ref = C;
+      // clang-format off
       for (int start_row_c = 0; start_row_c + kColA < row_stride_c; ++start_row_c) {
         for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
           C_plus_ref.block(start_row_c, start_col_c, kColA, kColB) +=
@@ -364,6 +370,7 @@
               << "C: \n" << C_assign;
         }
       }
+      // clang-format on
     }
   }
 }
@@ -388,6 +395,7 @@
       Vector c_minus_ref = c;
       Vector c_assign_ref = c;
 
+      // clang-format off
       c_plus_ref += A * b;
       MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
           A.data(), num_rows_a, num_cols_a,
@@ -417,6 +425,7 @@
           << "c += A * b \n"
           << "c_ref : \n" << c_assign_ref << "\n"
           << "c: \n" << c_assign;
+      // clang-format on
     }
   }
 }
@@ -441,6 +450,7 @@
       Vector c_minus_ref = c;
       Vector c_assign_ref = c;
 
+      // clang-format off
       c_plus_ref += A.transpose() * b;
       MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
           A.data(), num_rows_a, num_cols_a,
@@ -470,6 +480,7 @@
           << "c += A' * b \n"
           << "c_ref : \n" << c_assign_ref << "\n"
           << "c: \n" << c_assign;
+      // clang-format on
     }
   }
 }
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index 861d8d3..b7399e6 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -56,34 +56,34 @@
 namespace ceres {
 namespace {
 
+using internal::StringAppendF;
+using internal::StringPrintf;
 using std::map;
 using std::string;
 using std::vector;
-using internal::StringAppendF;
-using internal::StringPrintf;
 
-#define OPTION_OP(x, y, OP)                                             \
-  if (!(options.x OP y)) {                                              \
-    std::stringstream ss;                                               \
-    ss << "Invalid configuration. ";                                    \
-    ss << string("Solver::Options::" #x " = ") << options.x << ". ";    \
-    ss << "Violated constraint: ";                                      \
-    ss << string("Solver::Options::" #x " " #OP " "#y);                 \
-    *error = ss.str();                                                  \
-    return false;                                                       \
+#define OPTION_OP(x, y, OP)                                          \
+  if (!(options.x OP y)) {                                           \
+    std::stringstream ss;                                            \
+    ss << "Invalid configuration. ";                                 \
+    ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
+    ss << "Violated constraint: ";                                   \
+    ss << string("Solver::Options::" #x " " #OP " " #y);             \
+    *error = ss.str();                                               \
+    return false;                                                    \
   }
 
-#define OPTION_OP_OPTION(x, y, OP)                                      \
-  if (!(options.x OP options.y)) {                                      \
-    std::stringstream ss;                                               \
-    ss << "Invalid configuration. ";                                    \
-    ss << string("Solver::Options::" #x " = ") << options.x << ". ";    \
-    ss << string("Solver::Options::" #y " = ") << options.y << ". ";    \
-    ss << "Violated constraint: ";                                      \
-    ss << string("Solver::Options::" #x);                               \
-    ss << string(#OP " Solver::Options::" #y ".");                      \
-    *error = ss.str();                                                  \
-    return false;                                                       \
+#define OPTION_OP_OPTION(x, y, OP)                                   \
+  if (!(options.x OP options.y)) {                                   \
+    std::stringstream ss;                                            \
+    ss << "Invalid configuration. ";                                 \
+    ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
+    ss << string("Solver::Options::" #y " = ") << options.y << ". "; \
+    ss << "Violated constraint: ";                                   \
+    ss << string("Solver::Options::" #x);                            \
+    ss << string(#OP " Solver::Options::" #y ".");                   \
+    *error = ss.str();                                               \
+    return false;                                                    \
   }
 
 #define OPTION_GE(x, y) OPTION_OP(x, y, >=);
@@ -135,7 +135,8 @@
   if (options.linear_solver_type == ITERATIVE_SCHUR &&
       options.use_explicit_schur_complement &&
       options.preconditioner_type != SCHUR_JACOBI) {
-    *error =  "use_explicit_schur_complement only supports "
+    *error =
+        "use_explicit_schur_complement only supports "
         "SCHUR_JACOBI as the preconditioner.";
     return false;
   }
@@ -174,7 +175,8 @@
         *error = StringPrintf(
             "Can't use %s with "
             "Solver::Options::sparse_linear_algebra_library_type = %s.",
-            name, sparse_linear_algebra_library_name);
+            name,
+            sparse_linear_algebra_library_name);
         return false;
       } else if (!IsSparseLinearAlgebraLibraryTypeAvailable(
                      options.sparse_linear_algebra_library_type)) {
@@ -182,7 +184,8 @@
             "Can't use %s with "
             "Solver::Options::sparse_linear_algebra_library_type = %s, "
             "because support was not enabled when Ceres Solver was built.",
-            name, sparse_linear_algebra_library_name);
+            name,
+            sparse_linear_algebra_library_name);
         return false;
       }
     }
@@ -191,7 +194,8 @@
   if (options.trust_region_strategy_type == DOGLEG) {
     if (options.linear_solver_type == ITERATIVE_SCHUR ||
         options.linear_solver_type == CGNR) {
-      *error = "DOGLEG only supports exact factorization based linear "
+      *error =
+          "DOGLEG only supports exact factorization based linear "
           "solvers. If you want to use an iterative solver please "
           "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
       return false;
@@ -207,12 +211,13 @@
 
   if (options.dynamic_sparsity) {
     if (options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
-      *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+      *error =
+          "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
       return false;
     }
     if (options.sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
-      *error = "ACCELERATE_SPARSE is not currently supported with dynamic "
-          "sparsity.";
+      *error =
+          "ACCELERATE_SPARSE is not currently supported with dynamic sparsity.";
       return false;
     }
   }
@@ -250,10 +255,11 @@
        options.line_search_direction_type == ceres::LBFGS) &&
       options.line_search_type != ceres::WOLFE) {
     *error =
-        string("Invalid configuration: Solver::Options::line_search_type = ")
-        + string(LineSearchTypeToString(options.line_search_type))
-        + string(". When using (L)BFGS, "
-                 "Solver::Options::line_search_type must be set to WOLFE.");
+        string("Invalid configuration: Solver::Options::line_search_type = ") +
+        string(LineSearchTypeToString(options.line_search_type)) +
+        string(
+            ". When using (L)BFGS, "
+            "Solver::Options::line_search_type must be set to WOLFE.");
     return false;
   }
 
@@ -298,20 +304,24 @@
 
 void SummarizeGivenProgram(const internal::Program& program,
                            Solver::Summary* summary) {
+  // clang-format off
   summary->num_parameter_blocks     = program.NumParameterBlocks();
   summary->num_parameters           = program.NumParameters();
   summary->num_effective_parameters = program.NumEffectiveParameters();
   summary->num_residual_blocks      = program.NumResidualBlocks();
   summary->num_residuals            = program.NumResiduals();
+  // clang-format on
 }
 
 void SummarizeReducedProgram(const internal::Program& program,
                              Solver::Summary* summary) {
+  // clang-format off
   summary->num_parameter_blocks_reduced     = program.NumParameterBlocks();
   summary->num_parameters_reduced           = program.NumParameters();
   summary->num_effective_parameters_reduced = program.NumEffectiveParameters();
   summary->num_residual_blocks_reduced      = program.NumResidualBlocks();
   summary->num_residuals_reduced            = program.NumResiduals();
+  // clang-format on
 }
 
 void PreSolveSummarize(const Solver::Options& options,
@@ -323,6 +333,7 @@
   internal::OrderingToGroupSizes(options.inner_iteration_ordering.get(),
                                  &(summary->inner_iteration_ordering_given));
 
+  // clang-format off
   summary->dense_linear_algebra_library_type  = options.dense_linear_algebra_library_type;  //  NOLINT
   summary->dogleg_type                        = options.dogleg_type;
   summary->inner_iteration_time_in_seconds    = 0.0;
@@ -344,6 +355,7 @@
   summary->sparse_linear_algebra_library_type = options.sparse_linear_algebra_library_type; //  NOLINT
   summary->trust_region_strategy_type         = options.trust_region_strategy_type;         //  NOLINT
   summary->visibility_clustering_type         = options.visibility_clustering_type;         //  NOLINT
+  // clang-format on
 }
 
 void PostSolveSummarize(const internal::PreprocessedProblem& pp,
@@ -353,10 +365,12 @@
   internal::OrderingToGroupSizes(pp.options.inner_iteration_ordering.get(),
                                  &(summary->inner_iteration_ordering_used));
 
+  // clang-format off
   summary->inner_iterations_used          = pp.inner_iteration_minimizer.get() != NULL;     // NOLINT
   summary->linear_solver_type_used        = pp.linear_solver_options.type;
   summary->num_threads_used               = pp.options.num_threads;
   summary->preconditioner_type_used       = pp.options.preconditioner_type;
+  // clang-format on
 
   internal::SetSummaryFinalCost(summary);
 
@@ -402,14 +416,14 @@
   }
 }
 
-void Minimize(internal::PreprocessedProblem* pp,
-              Solver::Summary* summary) {
-  using internal::Program;
+void Minimize(internal::PreprocessedProblem* pp, Solver::Summary* summary) {
   using internal::Minimizer;
+  using internal::Program;
 
   Program* program = pp->reduced_program.get();
   if (pp->reduced_program->NumParameterBlocks() == 0) {
-    summary->message = "Function tolerance reached. "
+    summary->message =
+        "Function tolerance reached. "
         "No non-constant parameter blocks found.";
     summary->termination_type = CONVERGENCE;
     VLOG_IF(1, pp->options.logging_type != SILENT) << summary->message;
@@ -421,31 +435,29 @@
   const Vector original_reduced_parameters = pp->reduced_parameters;
   std::unique_ptr<Minimizer> minimizer(
       Minimizer::Create(pp->options.minimizer_type));
-  minimizer->Minimize(pp->minimizer_options,
-                      pp->reduced_parameters.data(),
-                      summary);
+  minimizer->Minimize(
+      pp->minimizer_options, pp->reduced_parameters.data(), summary);
 
   program->StateVectorToParameterBlocks(
-      summary->IsSolutionUsable()
-      ? pp->reduced_parameters.data()
-      : original_reduced_parameters.data());
+      summary->IsSolutionUsable() ? pp->reduced_parameters.data()
+                                  : original_reduced_parameters.data());
   program->CopyParameterBlockStateToUserState();
 }
 
 std::string SchurStructureToString(const int row_block_size,
                                    const int e_block_size,
                                    const int f_block_size) {
-  const std::string row =
-      (row_block_size == Eigen::Dynamic)
-      ? "d" : internal::StringPrintf("%d", row_block_size);
+  const std::string row = (row_block_size == Eigen::Dynamic)
+                              ? "d"
+                              : internal::StringPrintf("%d", row_block_size);
 
-  const std::string e =
-      (e_block_size == Eigen::Dynamic)
-      ? "d" : internal::StringPrintf("%d", e_block_size);
+  const std::string e = (e_block_size == Eigen::Dynamic)
+                            ? "d"
+                            : internal::StringPrintf("%d", e_block_size);
 
-  const std::string f =
-      (f_block_size == Eigen::Dynamic)
-      ? "d" : internal::StringPrintf("%d", f_block_size);
+  const std::string f = (f_block_size == Eigen::Dynamic)
+                            ? "d"
+                            : internal::StringPrintf("%d", f_block_size);
 
   return internal::StringPrintf("%s,%s,%s", row.c_str(), e.c_str(), f.c_str());
 }
@@ -503,12 +515,11 @@
   Solver::Options modified_options = options;
   if (options.check_gradients) {
     modified_options.callbacks.push_back(&gradient_checking_callback);
-    gradient_checking_problem.reset(
-        CreateGradientCheckingProblemImpl(
-            problem_impl,
-            options.gradient_check_numeric_derivative_relative_step_size,
-            options.gradient_check_relative_precision,
-            &gradient_checking_callback));
+    gradient_checking_problem.reset(CreateGradientCheckingProblemImpl(
+        problem_impl,
+        options.gradient_check_numeric_derivative_relative_step_size,
+        options.gradient_check_relative_precision,
+        &gradient_checking_callback));
     problem_impl = gradient_checking_problem.get();
     program = problem_impl->mutable_program();
   }
@@ -524,7 +535,8 @@
       Preprocessor::Create(modified_options.minimizer_type));
   PreprocessedProblem pp;
 
-  const bool status = preprocessor->Preprocess(modified_options, problem_impl, &pp);
+  const bool status =
+      preprocessor->Preprocess(modified_options, problem_impl, &pp);
 
   // We check the linear_solver_options.type rather than
   // modified_options.linear_solver_type because, depending on the
@@ -538,17 +550,16 @@
     int e_block_size;
     int f_block_size;
     DetectStructure(*static_cast<internal::BlockSparseMatrix*>(
-                        pp.minimizer_options.jacobian.get())
-                    ->block_structure(),
+                         pp.minimizer_options.jacobian.get())
+                         ->block_structure(),
                     pp.linear_solver_options.elimination_groups[0],
                     &row_block_size,
                     &e_block_size,
                     &f_block_size);
     summary->schur_structure_given =
         SchurStructureToString(row_block_size, e_block_size, f_block_size);
-    internal::GetBestSchurTemplateSpecialization(&row_block_size,
-                                                 &e_block_size,
-                                                 &f_block_size);
+    internal::GetBestSchurTemplateSpecialization(
+        &row_block_size, &e_block_size, &f_block_size);
     summary->schur_structure_used =
         SchurStructureToString(row_block_size, e_block_size, f_block_size);
   }
@@ -595,15 +606,16 @@
 }
 
 string Solver::Summary::BriefReport() const {
-  return StringPrintf("Ceres Solver Report: "
-                      "Iterations: %d, "
-                      "Initial cost: %e, "
-                      "Final cost: %e, "
-                      "Termination: %s",
-                      num_successful_steps + num_unsuccessful_steps,
-                      initial_cost,
-                      final_cost,
-                      TerminationTypeToString(termination_type));
+  return StringPrintf(
+      "Ceres Solver Report: "
+      "Iterations: %d, "
+      "Initial cost: %e, "
+      "Final cost: %e, "
+      "Termination: %s",
+      num_successful_steps + num_unsuccessful_steps,
+      initial_cost,
+      final_cost,
+      TerminationTypeToString(termination_type));
 }
 
 string Solver::Summary::FullReport() const {
@@ -612,28 +624,39 @@
   string report = string("\nSolver Summary (v " + VersionString() + ")\n\n");
 
   StringAppendF(&report, "%45s    %21s\n", "Original", "Reduced");
-  StringAppendF(&report, "Parameter blocks    % 25d% 25d\n",
-                num_parameter_blocks, num_parameter_blocks_reduced);
-  StringAppendF(&report, "Parameters          % 25d% 25d\n",
-                num_parameters, num_parameters_reduced);
+  StringAppendF(&report,
+                "Parameter blocks    % 25d% 25d\n",
+                num_parameter_blocks,
+                num_parameter_blocks_reduced);
+  StringAppendF(&report,
+                "Parameters          % 25d% 25d\n",
+                num_parameters,
+                num_parameters_reduced);
   if (num_effective_parameters_reduced != num_parameters_reduced) {
-    StringAppendF(&report, "Effective parameters% 25d% 25d\n",
-                  num_effective_parameters, num_effective_parameters_reduced);
+    StringAppendF(&report,
+                  "Effective parameters% 25d% 25d\n",
+                  num_effective_parameters,
+                  num_effective_parameters_reduced);
   }
-  StringAppendF(&report, "Residual blocks     % 25d% 25d\n",
-                num_residual_blocks, num_residual_blocks_reduced);
-  StringAppendF(&report, "Residuals           % 25d% 25d\n",
-                num_residuals, num_residuals_reduced);
+  StringAppendF(&report,
+                "Residual blocks     % 25d% 25d\n",
+                num_residual_blocks,
+                num_residual_blocks_reduced);
+  StringAppendF(&report,
+                "Residuals           % 25d% 25d\n",
+                num_residuals,
+                num_residuals_reduced);
 
   if (minimizer_type == TRUST_REGION) {
     // TRUST_SEARCH HEADER
-    StringAppendF(&report, "\nMinimizer                 %19s\n",
-                  "TRUST_REGION");
+    StringAppendF(
+        &report, "\nMinimizer                 %19s\n", "TRUST_REGION");
 
     if (linear_solver_type_used == DENSE_NORMAL_CHOLESKY ||
         linear_solver_type_used == DENSE_SCHUR ||
         linear_solver_type_used == DENSE_QR) {
-      StringAppendF(&report, "\nDense linear algebra library  %15s\n",
+      StringAppendF(&report,
+                    "\nDense linear algebra library  %15s\n",
                     DenseLinearAlgebraLibraryTypeToString(
                         dense_linear_algebra_library_type));
     }
@@ -643,14 +666,15 @@
         (linear_solver_type_used == ITERATIVE_SCHUR &&
          (preconditioner_type_used == CLUSTER_JACOBI ||
           preconditioner_type_used == CLUSTER_TRIDIAGONAL))) {
-      StringAppendF(&report, "\nSparse linear algebra library %15s\n",
+      StringAppendF(&report,
+                    "\nSparse linear algebra library %15s\n",
                     SparseLinearAlgebraLibraryTypeToString(
                         sparse_linear_algebra_library_type));
     }
 
-    StringAppendF(&report, "Trust region strategy     %19s",
-                  TrustRegionStrategyTypeToString(
-                      trust_region_strategy_type));
+    StringAppendF(&report,
+                  "Trust region strategy     %19s",
+                  TrustRegionStrategyTypeToString(trust_region_strategy_type));
     if (trust_region_strategy_type == DOGLEG) {
       if (dogleg_type == TRADITIONAL_DOGLEG) {
         StringAppendF(&report, " (TRADITIONAL)");
@@ -661,28 +685,32 @@
     StringAppendF(&report, "\n");
     StringAppendF(&report, "\n");
 
-    StringAppendF(&report, "%45s    %21s\n", "Given",  "Used");
-    StringAppendF(&report, "Linear solver       %25s%25s\n",
+    StringAppendF(&report, "%45s    %21s\n", "Given", "Used");
+    StringAppendF(&report,
+                  "Linear solver       %25s%25s\n",
                   LinearSolverTypeToString(linear_solver_type_given),
                   LinearSolverTypeToString(linear_solver_type_used));
 
     if (linear_solver_type_given == CGNR ||
         linear_solver_type_given == ITERATIVE_SCHUR) {
-      StringAppendF(&report, "Preconditioner      %25s%25s\n",
+      StringAppendF(&report,
+                    "Preconditioner      %25s%25s\n",
                     PreconditionerTypeToString(preconditioner_type_given),
                     PreconditionerTypeToString(preconditioner_type_used));
     }
 
     if (preconditioner_type_used == CLUSTER_JACOBI ||
         preconditioner_type_used == CLUSTER_TRIDIAGONAL) {
-      StringAppendF(&report, "Visibility clustering%24s%25s\n",
-                    VisibilityClusteringTypeToString(
-                        visibility_clustering_type),
-                    VisibilityClusteringTypeToString(
-                        visibility_clustering_type));
+      StringAppendF(
+          &report,
+          "Visibility clustering%24s%25s\n",
+          VisibilityClusteringTypeToString(visibility_clustering_type),
+          VisibilityClusteringTypeToString(visibility_clustering_type));
     }
-    StringAppendF(&report, "Threads             % 25d% 25d\n",
-                  num_threads_given, num_threads_used);
+    StringAppendF(&report,
+                  "Threads             % 25d% 25d\n",
+                  num_threads_given,
+                  num_threads_used);
 
     string given;
     StringifyOrdering(linear_solver_ordering_given, &given);
@@ -711,68 +739,71 @@
       StringifyOrdering(inner_iteration_ordering_given, &given);
       string used;
       StringifyOrdering(inner_iteration_ordering_used, &used);
-    StringAppendF(&report,
-                  "Inner iteration ordering %20s %24s\n",
-                  given.c_str(),
-                  used.c_str());
+      StringAppendF(&report,
+                    "Inner iteration ordering %20s %24s\n",
+                    given.c_str(),
+                    used.c_str());
     }
   } else {
     // LINE_SEARCH HEADER
     StringAppendF(&report, "\nMinimizer                 %19s\n", "LINE_SEARCH");
 
-
     string line_search_direction_string;
     if (line_search_direction_type == LBFGS) {
       line_search_direction_string = StringPrintf("LBFGS (%d)", max_lbfgs_rank);
     } else if (line_search_direction_type == NONLINEAR_CONJUGATE_GRADIENT) {
-      line_search_direction_string =
-          NonlinearConjugateGradientTypeToString(
-              nonlinear_conjugate_gradient_type);
+      line_search_direction_string = NonlinearConjugateGradientTypeToString(
+          nonlinear_conjugate_gradient_type);
     } else {
       line_search_direction_string =
           LineSearchDirectionTypeToString(line_search_direction_type);
     }
 
-    StringAppendF(&report, "Line search direction     %19s\n",
+    StringAppendF(&report,
+                  "Line search direction     %19s\n",
                   line_search_direction_string.c_str());
 
-    const string line_search_type_string =
-        StringPrintf("%s %s",
-                     LineSearchInterpolationTypeToString(
-                         line_search_interpolation_type),
-                     LineSearchTypeToString(line_search_type));
-    StringAppendF(&report, "Line search type          %19s\n",
+    const string line_search_type_string = StringPrintf(
+        "%s %s",
+        LineSearchInterpolationTypeToString(line_search_interpolation_type),
+        LineSearchTypeToString(line_search_type));
+    StringAppendF(&report,
+                  "Line search type          %19s\n",
                   line_search_type_string.c_str());
     StringAppendF(&report, "\n");
 
-    StringAppendF(&report, "%45s    %21s\n", "Given",  "Used");
-    StringAppendF(&report, "Threads             % 25d% 25d\n",
-                  num_threads_given, num_threads_used);
+    StringAppendF(&report, "%45s    %21s\n", "Given", "Used");
+    StringAppendF(&report,
+                  "Threads             % 25d% 25d\n",
+                  num_threads_given,
+                  num_threads_used);
   }
 
   StringAppendF(&report, "\nCost:\n");
   StringAppendF(&report, "Initial        % 30e\n", initial_cost);
-  if (termination_type != FAILURE &&
-      termination_type != USER_FAILURE) {
+  if (termination_type != FAILURE && termination_type != USER_FAILURE) {
     StringAppendF(&report, "Final          % 30e\n", final_cost);
-    StringAppendF(&report, "Change         % 30e\n",
-                  initial_cost - final_cost);
+    StringAppendF(&report, "Change         % 30e\n", initial_cost - final_cost);
   }
 
-  StringAppendF(&report, "\nMinimizer iterations         % 16d\n",
+  StringAppendF(&report,
+                "\nMinimizer iterations         % 16d\n",
                 num_successful_steps + num_unsuccessful_steps);
 
   // Successful/Unsuccessful steps only matter in the case of the
   // trust region solver. Line search terminates when it encounters
   // the first unsuccessful step.
   if (minimizer_type == TRUST_REGION) {
-    StringAppendF(&report, "Successful steps               % 14d\n",
+    StringAppendF(&report,
+                  "Successful steps               % 14d\n",
                   num_successful_steps);
-    StringAppendF(&report, "Unsuccessful steps             % 14d\n",
+    StringAppendF(&report,
+                  "Unsuccessful steps             % 14d\n",
                   num_unsuccessful_steps);
   }
   if (inner_iterations_used) {
-    StringAppendF(&report, "Steps with inner iterations    % 14d\n",
+    StringAppendF(&report,
+                  "Steps with inner iterations    % 14d\n",
                   num_inner_iteration_steps);
   }
 
@@ -781,53 +812,66 @@
        (minimizer_type == TRUST_REGION && is_constrained));
 
   if (line_search_used) {
-    StringAppendF(&report, "Line search steps              % 14d\n",
+    StringAppendF(&report,
+                  "Line search steps              % 14d\n",
                   num_line_search_steps);
   }
 
   StringAppendF(&report, "\nTime (in seconds):\n");
-  StringAppendF(&report, "Preprocessor        %25.6f\n",
-                preprocessor_time_in_seconds);
+  StringAppendF(
+      &report, "Preprocessor        %25.6f\n", preprocessor_time_in_seconds);
 
-  StringAppendF(&report, "\n  Residual only evaluation %18.6f (%d)\n",
-                residual_evaluation_time_in_seconds, num_residual_evaluations);
+  StringAppendF(&report,
+                "\n  Residual only evaluation %18.6f (%d)\n",
+                residual_evaluation_time_in_seconds,
+                num_residual_evaluations);
   if (line_search_used) {
-    StringAppendF(&report, "    Line search cost evaluation    %10.6f\n",
+    StringAppendF(&report,
+                  "    Line search cost evaluation    %10.6f\n",
                   line_search_cost_evaluation_time_in_seconds);
   }
-  StringAppendF(&report, "  Jacobian & residual evaluation %12.6f (%d)\n",
-                jacobian_evaluation_time_in_seconds, num_jacobian_evaluations);
+  StringAppendF(&report,
+                "  Jacobian & residual evaluation %12.6f (%d)\n",
+                jacobian_evaluation_time_in_seconds,
+                num_jacobian_evaluations);
   if (line_search_used) {
-    StringAppendF(&report, "    Line search gradient evaluation   %6.6f\n",
+    StringAppendF(&report,
+                  "    Line search gradient evaluation   %6.6f\n",
                   line_search_gradient_evaluation_time_in_seconds);
   }
 
   if (minimizer_type == TRUST_REGION) {
-    StringAppendF(&report, "  Linear solver       %23.6f (%d)\n",
-                  linear_solver_time_in_seconds, num_linear_solves);
+    StringAppendF(&report,
+                  "  Linear solver       %23.6f (%d)\n",
+                  linear_solver_time_in_seconds,
+                  num_linear_solves);
   }
 
   if (inner_iterations_used) {
-    StringAppendF(&report, "  Inner iterations    %23.6f\n",
+    StringAppendF(&report,
+                  "  Inner iterations    %23.6f\n",
                   inner_iteration_time_in_seconds);
   }
 
   if (line_search_used) {
-    StringAppendF(&report, "  Line search polynomial minimization  %.6f\n",
+    StringAppendF(&report,
+                  "  Line search polynomial minimization  %.6f\n",
                   line_search_polynomial_minimization_time_in_seconds);
   }
 
-  StringAppendF(&report, "Minimizer           %25.6f\n\n",
-                minimizer_time_in_seconds);
+  StringAppendF(
+      &report, "Minimizer           %25.6f\n\n", minimizer_time_in_seconds);
 
-  StringAppendF(&report, "Postprocessor        %24.6f\n",
-                postprocessor_time_in_seconds);
+  StringAppendF(
+      &report, "Postprocessor        %24.6f\n", postprocessor_time_in_seconds);
 
-  StringAppendF(&report, "Total               %25.6f\n\n",
-                total_time_in_seconds);
+  StringAppendF(
+      &report, "Total               %25.6f\n\n", total_time_in_seconds);
 
-  StringAppendF(&report, "Termination:        %25s (%s)\n",
-                TerminationTypeToString(termination_type), message.c_str());
+  StringAppendF(&report,
+                "Termination:        %25s (%s)\n",
+                TerminationTypeToString(termination_type),
+                message.c_str());
   return report;
 }
 
diff --git a/internal/ceres/solver_utils.cc b/internal/ceres/solver_utils.cc
index 177a928..b6faa2a 100644
--- a/internal/ceres/solver_utils.cc
+++ b/internal/ceres/solver_utils.cc
@@ -28,22 +28,22 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/solver_utils.h"
+
 #include <string>
 
-#include "ceres/internal/config.h"
-
 #include "Eigen/Core"
+#include "ceres/internal/config.h"
 #include "ceres/internal/port.h"
-#include "ceres/solver_utils.h"
 #include "ceres/version.h"
 
 namespace ceres {
 namespace internal {
 
-#define CERES_EIGEN_VERSION                                          \
-  CERES_TO_STRING(EIGEN_WORLD_VERSION) "."                           \
-  CERES_TO_STRING(EIGEN_MAJOR_VERSION) "."                           \
-  CERES_TO_STRING(EIGEN_MINOR_VERSION)
+#define CERES_EIGEN_VERSION                                     \
+  CERES_TO_STRING(EIGEN_WORLD_VERSION)                          \
+  "." CERES_TO_STRING(EIGEN_MAJOR_VERSION) "." CERES_TO_STRING( \
+      EIGEN_MINOR_VERSION)
 
 std::string VersionString() {
   std::string value = std::string(CERES_VERSION_STRING);
diff --git a/internal/ceres/sparse_cholesky.cc b/internal/ceres/sparse_cholesky.cc
index 0639ea9..91cdf67 100644
--- a/internal/ceres/sparse_cholesky.cc
+++ b/internal/ceres/sparse_cholesky.cc
@@ -89,7 +89,8 @@
       if (options.use_mixed_precision_solves) {
         sparse_cholesky = AppleAccelerateCholesky<float>::Create(ordering_type);
       } else {
-        sparse_cholesky = AppleAccelerateCholesky<double>::Create(ordering_type);
+        sparse_cholesky =
+            AppleAccelerateCholesky<double>::Create(ordering_type);
       }
       break;
 #else
diff --git a/internal/ceres/sparse_cholesky.h b/internal/ceres/sparse_cholesky.h
index bbe4237..9be98bd 100644
--- a/internal/ceres/sparse_cholesky.h
+++ b/internal/ceres/sparse_cholesky.h
@@ -32,9 +32,12 @@
 #define CERES_INTERNAL_SPARSE_CHOLESKY_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include <memory>
+
 #include "ceres/linear_solver.h"
 #include "glog/logging.h"
 
@@ -88,8 +91,8 @@
   // Subsequent calls to Factorize will use that symbolic
   // factorization assuming that the sparsity of the matrix has
   // remained constant.
-  virtual LinearSolverTerminationType Factorize(
-      CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message) = 0;
 
   // Computes the solution to the equation
   //
@@ -106,7 +109,6 @@
       const double* rhs,
       double* solution,
       std::string* message);
-
 };
 
 class IterativeRefiner;
@@ -120,8 +122,8 @@
   virtual ~RefinedSparseCholesky();
 
   virtual CompressedRowSparseMatrix::StorageType StorageType() const;
-  virtual LinearSolverTerminationType Factorize(
-      CompressedRowSparseMatrix* lhs, std::string* message);
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message);
   virtual LinearSolverTerminationType Solve(const double* rhs,
                                             double* solution,
                                             std::string* message);
diff --git a/internal/ceres/sparse_matrix.cc b/internal/ceres/sparse_matrix.cc
index f95ff32..32388f5 100644
--- a/internal/ceres/sparse_matrix.cc
+++ b/internal/ceres/sparse_matrix.cc
@@ -33,8 +33,7 @@
 namespace ceres {
 namespace internal {
 
-SparseMatrix::~SparseMatrix() {
-}
+SparseMatrix::~SparseMatrix() {}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/sparse_matrix.h b/internal/ceres/sparse_matrix.h
index 074d847..b8a3918 100644
--- a/internal/ceres/sparse_matrix.h
+++ b/internal/ceres/sparse_matrix.h
@@ -34,8 +34,9 @@
 #define CERES_INTERNAL_SPARSE_MATRIX_H_
 
 #include <cstdio>
-#include "ceres/linear_operator.h"
+
 #include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
 #include "ceres/types.h"
 
 namespace ceres {
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
index cbff2bd..ef32743 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -35,9 +35,12 @@
 #define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
 
 // This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
 #include "ceres/internal/port.h"
+// clang-format on
 
 #include <vector>
+
 #include "ceres/linear_solver.h"
 
 namespace ceres {
@@ -58,11 +61,10 @@
   virtual ~SparseNormalCholeskySolver();
 
  private:
-  LinearSolver::Summary SolveImpl(
-      BlockSparseMatrix* A,
-      const double* b,
-      const LinearSolver::PerSolveOptions& options,
-      double* x) final;
+  LinearSolver::Summary SolveImpl(BlockSparseMatrix* A,
+                                  const double* b,
+                                  const LinearSolver::PerSolveOptions& options,
+                                  double* x) final;
 
   const LinearSolver::Options options_;
   Vector rhs_;
diff --git a/internal/ceres/sparse_normal_cholesky_solver_test.cc b/internal/ceres/sparse_normal_cholesky_solver_test.cc
index 10f898b..8acb98e 100644
--- a/internal/ceres/sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver_test.cc
@@ -29,6 +29,8 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include <memory>
+
+#include "Eigen/Cholesky"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/casts.h"
 #include "ceres/context_impl.h"
@@ -39,8 +41,6 @@
 #include "glog/logging.h"
 #include "gtest/gtest.h"
 
-#include "Eigen/Cholesky"
-
 namespace ceres {
 namespace internal {
 
diff --git a/internal/ceres/split.cc b/internal/ceres/split.cc
index 3a09e86..804f441 100644
--- a/internal/ceres/split.cc
+++ b/internal/ceres/split.cc
@@ -75,10 +75,9 @@
 }
 
 template <typename StringType, typename ITR>
-static inline
-void SplitStringToIteratorUsing(const StringType& full,
-                                const char* delim,
-                                ITR& result) {
+static inline void SplitStringToIteratorUsing(const StringType& full,
+                                              const char* delim,
+                                              ITR& result) {
   // Optimize the common case where delim is a single character.
   if (delim[0] != '\0' && delim[1] == '\0') {
     char c = delim[0];
diff --git a/internal/ceres/split.h b/internal/ceres/split.h
index 94b773d..f513023 100644
--- a/internal/ceres/split.h
+++ b/internal/ceres/split.h
@@ -33,6 +33,7 @@
 
 #include <string>
 #include <vector>
+
 #include "ceres/internal/port.h"
 
 namespace ceres {
@@ -41,7 +42,8 @@
 // Split a string using one or more character delimiters, presented as a
 // nul-terminated c string. Append the components to 'result'. If there are
 // consecutive delimiters, this function skips over all of them.
-void SplitStringUsing(const std::string& full, const char* delim,
+void SplitStringUsing(const std::string& full,
+                      const char* delim,
                       std::vector<std::string>* res);
 
 }  // namespace internal
diff --git a/internal/ceres/stl_util.h b/internal/ceres/stl_util.h
index 0595a4c..d3411b7 100644
--- a/internal/ceres/stl_util.h
+++ b/internal/ceres/stl_util.h
@@ -46,8 +46,7 @@
 // advanced, which could result in the hash function trying to deference a
 // stale pointer.
 template <class ForwardIterator>
-void STLDeleteContainerPointers(ForwardIterator begin,
-                                ForwardIterator end) {
+void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
   while (begin != end) {
     ForwardIterator temp = begin;
     ++begin;
@@ -80,7 +79,7 @@
 // ElementDeleter (defined below), which ensures that your container's elements
 // are deleted when the ElementDeleter goes out of scope.
 template <class T>
-void STLDeleteElements(T *container) {
+void STLDeleteElements(T* container) {
   if (!container) return;
   STLDeleteContainerPointers(container->begin(), container->end());
   container->clear();
diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
index 7a21f0e..b0e2acc 100644
--- a/internal/ceres/stringprintf.cc
+++ b/internal/ceres/stringprintf.cc
@@ -62,7 +62,7 @@
       return;
     }
 
-#if defined (_MSC_VER)
+#if defined(_MSC_VER)
     // Error or MSVC running out of space.  MSVC 8.0 and higher
     // can be asked about space needed with the special idiom below:
     va_copy(backup_ap, ap);
@@ -78,7 +78,7 @@
 
   // Increase the buffer size to the size requested by vsnprintf,
   // plus one for the closing \0.
-  int length = result+1;
+  int length = result + 1;
   char* buf = new char[length];
 
   // Restore the va_list before we use it again
@@ -93,7 +93,6 @@
   delete[] buf;
 }
 
-
 string StringPrintf(const char* format, ...) {
   va_list ap;
   va_start(ap, format);
diff --git a/internal/ceres/stringprintf.h b/internal/ceres/stringprintf.h
index feeb9c2..98e98cd 100644
--- a/internal/ceres/stringprintf.h
+++ b/internal/ceres/stringprintf.h
@@ -55,9 +55,9 @@
 // have an implicit 'this' argument, the arguments of such methods
 // should be counted from two, not one."
 #define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check) \
-    __attribute__((__format__ (__printf__, string_index, first_to_check)))
+  __attribute__((__format__(__printf__, string_index, first_to_check)))
 #define CERES_SCANF_ATTRIBUTE(string_index, first_to_check) \
-    __attribute__((__format__ (__scanf__, string_index, first_to_check)))
+  __attribute__((__format__(__scanf__, string_index, first_to_check)))
 #else
 #define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check)
 #endif
@@ -68,7 +68,9 @@
     CERES_PRINTF_ATTRIBUTE(1, 2);
 
 // Store result into a supplied string and return it.
-extern const std::string& SStringPrintf(std::string* dst, const char* format, ...)
+extern const std::string& SStringPrintf(std::string* dst,
+                                        const char* format,
+                                        ...)
     // Tell the compiler to do printf format string checking.
     CERES_PRINTF_ATTRIBUTE(2, 3);
 
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
index 7c24ae9..779a34a 100644
--- a/internal/ceres/subset_preconditioner.cc
+++ b/internal/ceres/subset_preconditioner.cc
@@ -32,6 +32,7 @@
 
 #include <memory>
 #include <string>
+
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/inner_product_computer.h"
 #include "ceres/linear_solver.h"
@@ -50,8 +51,7 @@
   LinearSolver::Options sparse_cholesky_options;
   sparse_cholesky_options.sparse_linear_algebra_library_type =
       options_.sparse_linear_algebra_library_type;
-  sparse_cholesky_options.use_postordering =
-      options_.use_postordering;
+  sparse_cholesky_options.use_postordering = options_.use_postordering;
   sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
 }
 
diff --git a/internal/ceres/subset_preconditioner.h b/internal/ceres/subset_preconditioner.h
index 6f3c9ec..f83153c 100644
--- a/internal/ceres/subset_preconditioner.h
+++ b/internal/ceres/subset_preconditioner.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
 
 #include <memory>
+
 #include "ceres/preconditioner.h"
 
 namespace ceres {
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
index ec0ea9a..202110b 100644
--- a/internal/ceres/subset_preconditioner_test.cc
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/subset_preconditioner.h"
 
 #include <memory>
+
 #include "Eigen/Dense"
 #include "Eigen/SparseCore"
 #include "ceres/block_sparse_matrix.h"
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 190d175..0d6f6bd 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -32,13 +32,12 @@
 #include "ceres/internal/port.h"
 
 #ifndef CERES_NO_SUITESPARSE
-#include "ceres/suitesparse.h"
-
 #include <vector>
 
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/linear_solver.h"
+#include "ceres/suitesparse.h"
 #include "ceres/triplet_sparse_matrix.h"
 #include "cholmod.h"
 
@@ -353,7 +352,8 @@
 
 std::unique_ptr<SparseCholesky> SuiteSparseCholesky::Create(
     const OrderingType ordering_type) {
-  return std::unique_ptr<SparseCholesky>(new SuiteSparseCholesky(ordering_type));
+  return std::unique_ptr<SparseCholesky>(
+      new SuiteSparseCholesky(ordering_type));
 }
 
 SuiteSparseCholesky::SuiteSparseCholesky(const OrderingType ordering_type)
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index b77b296..5dcc53f 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -41,6 +41,7 @@
 #include <cstring>
 #include <string>
 #include <vector>
+
 #include "SuiteSparseQR.hpp"
 #include "ceres/linear_solver.h"
 #include "ceres/sparse_cholesky.h"
@@ -116,20 +117,23 @@
   // for symmetric scaling which scales both the rows and the columns
   // - diag(scale) * A * diag(scale).
   void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
-     cholmod_scale(scale, mode, A, &cc_);
+    cholmod_scale(scale, mode, A, &cc_);
   }
 
   // Create and return a matrix m = A * A'. Caller owns the
   // result. The matrix A is not modified.
   cholmod_sparse* AATranspose(cholmod_sparse* A) {
-    cholmod_sparse*m =  cholmod_aat(A, NULL, A->nrow, 1, &cc_);
+    cholmod_sparse* m = cholmod_aat(A, NULL, A->nrow, 1, &cc_);
     m->stype = 1;  // Pay attention to the upper triangular part.
     return m;
   }
 
   // y = alpha * A * x + beta * y. Only y is modified.
-  void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
-                           cholmod_dense* x, cholmod_dense* y) {
+  void SparseDenseMultiply(cholmod_sparse* A,
+                           double alpha,
+                           double beta,
+                           cholmod_dense* x,
+                           cholmod_dense* y) {
     double alpha_[2] = {alpha, 0};
     double beta_[2] = {beta, 0};
     cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
@@ -195,7 +199,9 @@
   // NULL is returned. Caller owns the result.
   //
   // message contains an explanation of the failures if any.
-  cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, std::string* message);
+  cholmod_dense* Solve(cholmod_factor* L,
+                       cholmod_dense* b,
+                       std::string* message);
 
   // By virtue of the modeling layer in Ceres being block oriented,
   // all the matrices used by Ceres are also block oriented. When
@@ -229,7 +235,6 @@
   // ordering.
   bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
 
-
   // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
   // if SuiteSparse was compiled with Metis support. This makes
   // calling and linking into cholmod_camd problematic even though it
@@ -262,7 +267,7 @@
                                                    int* ordering);
 
   void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
-  void Free(cholmod_dense* m)  { cholmod_free_dense(&m, &cc_);  }
+  void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); }
   void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
 
   void Print(cholmod_sparse* m, const std::string& name) {
@@ -285,17 +290,17 @@
 
 class SuiteSparseCholesky : public SparseCholesky {
  public:
-  static std::unique_ptr<SparseCholesky> Create(
-      OrderingType ordering_type);
+  static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
 
   // SparseCholesky interface.
   virtual ~SuiteSparseCholesky();
   CompressedRowSparseMatrix::StorageType StorageType() const final;
-  LinearSolverTerminationType Factorize(
-      CompressedRowSparseMatrix* lhs, std::string* message) final;
+  LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                        std::string* message) final;
   LinearSolverTerminationType Solve(const double* rhs,
                                     double* solution,
                                     std::string* message) final;
+
  private:
   SuiteSparseCholesky(const OrderingType ordering_type);
 
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
index 3f635d0..429973f 100644
--- a/internal/ceres/system_test.cc
+++ b/internal/ceres/system_test.cc
@@ -64,10 +64,10 @@
 class PowellsFunction {
  public:
   PowellsFunction() {
-    x_[0] =  3.0;
+    x_[0] = 3.0;
     x_[1] = -1.0;
-    x_[2] =  0.0;
-    x_[3] =  1.0;
+    x_[2] = 0.0;
+    x_[3] = 1.0;
 
     problem_.AddResidualBlock(
         new AutoDiffCostFunction<F1, 1, 1, 1>(new F1), NULL, &x_[0], &x_[1]);
@@ -94,9 +94,8 @@
   // functions.
   class F1 {
    public:
-    template <typename T> bool operator()(const T* const x1,
-                                          const T* const x2,
-                                          T* residual) const {
+    template <typename T>
+    bool operator()(const T* const x1, const T* const x2, T* residual) const {
       // f1 = x1 + 10 * x2;
       *residual = *x1 + 10.0 * *x2;
       return true;
@@ -105,9 +104,8 @@
 
   class F2 {
    public:
-    template <typename T> bool operator()(const T* const x3,
-                                          const T* const x4,
-                                          T* residual) const {
+    template <typename T>
+    bool operator()(const T* const x3, const T* const x4, T* residual) const {
       // f2 = sqrt(5) (x3 - x4)
       *residual = sqrt(5.0) * (*x3 - *x4);
       return true;
@@ -116,9 +114,8 @@
 
   class F3 {
    public:
-    template <typename T> bool operator()(const T* const x2,
-                                          const T* const x4,
-                                          T* residual) const {
+    template <typename T>
+    bool operator()(const T* const x2, const T* const x4, T* residual) const {
       // f3 = (x2 - 2 x3)^2
       residual[0] = (x2[0] - 2.0 * x4[0]) * (x2[0] - 2.0 * x4[0]);
       return true;
@@ -127,9 +124,8 @@
 
   class F4 {
    public:
-    template <typename T> bool operator()(const T* const x1,
-                                          const T* const x4,
-                                          T* residual) const {
+    template <typename T>
+    bool operator()(const T* const x1, const T* const x4, T* residual) const {
       // f4 = sqrt(10) (x1 - x4)^2
       residual[0] = sqrt(10.0) * (x1[0] - x4[0]) * (x1[0] - x4[0]);
       return true;
diff --git a/internal/ceres/thread_pool.cc b/internal/ceres/thread_pool.cc
index 5a52c9d..821431c 100644
--- a/internal/ceres/thread_pool.cc
+++ b/internal/ceres/thread_pool.cc
@@ -33,11 +33,11 @@
 
 #ifdef CERES_USE_CXX_THREADS
 
-#include "ceres/thread_pool.h"
-
 #include <cmath>
 #include <limits>
 
+#include "ceres/thread_pool.h"
+
 namespace ceres {
 namespace internal {
 namespace {
@@ -53,16 +53,13 @@
   const int num_hardware_threads = std::thread::hardware_concurrency();
   // hardware_concurrency() can return 0 if the value is not well defined or not
   // computable.
-  return num_hardware_threads == 0
-      ? std::numeric_limits<int>::max()
-      : num_hardware_threads;
+  return num_hardware_threads == 0 ? std::numeric_limits<int>::max()
+                                   : num_hardware_threads;
 }
 
-ThreadPool::ThreadPool() { }
+ThreadPool::ThreadPool() {}
 
-ThreadPool::ThreadPool(int num_threads) {
-  Resize(num_threads);
-}
+ThreadPool::ThreadPool(int num_threads) { Resize(num_threads); }
 
 ThreadPool::~ThreadPool() {
   std::lock_guard<std::mutex> lock(thread_pool_mutex_);
@@ -106,11 +103,9 @@
   }
 }
 
-void ThreadPool::Stop() {
-  task_queue_.StopWaiters();
-}
+void ThreadPool::Stop() { task_queue_.StopWaiters(); }
 
 }  // namespace internal
 }  // namespace ceres
 
-#endif // CERES_USE_CXX_THREADS
+#endif  // CERES_USE_CXX_THREADS
diff --git a/internal/ceres/thread_pool_test.cc b/internal/ceres/thread_pool_test.cc
index 48ba9d1..e39f673 100644
--- a/internal/ceres/thread_pool_test.cc
+++ b/internal/ceres/thread_pool_test.cc
@@ -33,16 +33,15 @@
 
 #ifdef CERES_USE_CXX_THREADS
 
-#include "ceres/thread_pool.h"
-
 #include <chrono>
 #include <condition_variable>
 #include <mutex>
 #include <thread>
 
+#include "ceres/thread_pool.h"
+#include "glog/logging.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
@@ -59,14 +58,14 @@
 
     for (int i = 0; i < num_tasks; ++i) {
       thread_pool.AddTask([&]() {
-          std::lock_guard<std::mutex> lock(mutex);
-          ++value;
-          condition.notify_all();
-        });
+        std::lock_guard<std::mutex> lock(mutex);
+        ++value;
+        condition.notify_all();
+      });
     }
 
     std::unique_lock<std::mutex> lock(mutex);
-    condition.wait(lock, [&](){return value == num_tasks;});
+    condition.wait(lock, [&]() { return value == num_tasks; });
   }
 
   EXPECT_EQ(num_tasks, value);
@@ -116,7 +115,7 @@
 
     // Unlock the mutex to unblock all of the threads and wait until all of the
     // tasks are completed.
-    condition.wait(lock, [&](){return value == num_tasks;});
+    condition.wait(lock, [&]() { return value == num_tasks; });
   }
 
   EXPECT_EQ(num_tasks, value);
@@ -197,4 +196,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif // CERES_USE_CXX_THREADS
+#endif  // CERES_USE_CXX_THREADS
diff --git a/internal/ceres/thread_token_provider.cc b/internal/ceres/thread_token_provider.cc
index b04cf84..c7ec67f 100644
--- a/internal/ceres/thread_token_provider.cc
+++ b/internal/ceres/thread_token_provider.cc
@@ -44,7 +44,6 @@
     pool_.Push(i);
   }
 #endif
-
 }
 
 int ThreadTokenProvider::Acquire() {
@@ -61,7 +60,6 @@
   CHECK(pool_.Wait(&thread_id));
   return thread_id;
 #endif
-
 }
 
 void ThreadTokenProvider::Release(int thread_id) {
@@ -69,7 +67,6 @@
 #ifdef CERES_USE_CXX_THREADS
   pool_.Push(thread_id);
 #endif
-
 }
 
 }  // namespace internal
diff --git a/internal/ceres/tiny_solver_autodiff_function_test.cc b/internal/ceres/tiny_solver_autodiff_function_test.cc
index 90033fc..2598188 100644
--- a/internal/ceres/tiny_solver_autodiff_function_test.cc
+++ b/internal/ceres/tiny_solver_autodiff_function_test.cc
@@ -30,27 +30,27 @@
 // Author: mierle@gmail.com (Keir Mierle)
 
 #include "ceres/tiny_solver_autodiff_function.h"
-#include "ceres/tiny_solver.h"
-#include "ceres/tiny_solver_test_util.h"
 
 #include <algorithm>
 #include <cmath>
 #include <limits>
 
+#include "ceres/tiny_solver.h"
+#include "ceres/tiny_solver_test_util.h"
 #include "gtest/gtest.h"
 
 namespace ceres {
 
 struct AutoDiffTestFunctor {
-  template<typename T>
+  template <typename T>
   bool operator()(const T* const parameters, T* residuals) const {
     // Shift the parameters so the solution is not at the origin, to prevent
     // accidentally showing "PASS".
     const T& a = parameters[0] - T(1.0);
     const T& b = parameters[1] - T(2.0);
     const T& c = parameters[2] - T(3.0);
-    residuals[0] = 2.*a + 0.*b + 1.*c;
-    residuals[1] = 0.*a + 4.*b + 6.*c;
+    residuals[0] = 2. * a + 0. * b + 1. * c;
+    residuals[1] = 0. * a + 4. * b + 6. * c;
     return true;
   }
 };
@@ -103,11 +103,9 @@
     NUM_PARAMETERS = 3,
   };
 
-  int NumResiduals() const {
-    return 2;
-  }
+  int NumResiduals() const { return 2; }
 
-  template<typename T>
+  template <typename T>
   bool operator()(const T* parameters, T* residuals) const {
     // Jacobian is not evaluated by cost function, but by autodiff.
     T* jacobian = nullptr;
@@ -115,7 +113,7 @@
   }
 };
 
-template<typename Function, typename Vector>
+template <typename Function, typename Vector>
 void TestHelper(const Function& f, const Vector& x0) {
   Vector x = x0;
   Eigen::Vector2d residuals;
@@ -133,10 +131,8 @@
   Eigen::Vector3d x0(0.76026643, -30.01799744, 0.55192142);
 
   DynamicResidualsFunctor f;
-  using AutoDiffCostFunctor =
-      ceres::TinySolverAutoDiffFunction<DynamicResidualsFunctor,
-                                        Eigen::Dynamic,
-                                        3>;
+  using AutoDiffCostFunctor = ceres::
+      TinySolverAutoDiffFunction<DynamicResidualsFunctor, Eigen::Dynamic, 3>;
   AutoDiffCostFunctor f_autodiff(f);
 
   Eigen::Vector2d residuals;
diff --git a/internal/ceres/tiny_solver_cost_function_adapter_test.cc b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
index 13ad406..6f57193 100644
--- a/internal/ceres/tiny_solver_cost_function_adapter_test.cc
+++ b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
@@ -40,7 +40,7 @@
 
 namespace ceres {
 
-class CostFunction2x3 : public SizedCostFunction<2,3> {
+class CostFunction2x3 : public SizedCostFunction<2, 3> {
   bool Evaluate(double const* const* parameters,
                 double* residuals,
                 double** jacobians) const final {
@@ -48,7 +48,7 @@
     double y = parameters[0][1];
     double z = parameters[0][2];
 
-    residuals[0] = x + 2*y + 4*z;
+    residuals[0] = x + 2 * y + 4 * z;
     residuals[1] = y * z;
 
     if (jacobians && jacobians[0]) {
@@ -65,10 +65,11 @@
   }
 };
 
-template<int kNumResiduals, int kNumParameters>
+template <int kNumResiduals, int kNumParameters>
 void TestHelper() {
   std::unique_ptr<CostFunction> cost_function(new CostFunction2x3);
-  typedef  TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters> CostFunctionAdapter;
+  typedef TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters>
+      CostFunctionAdapter;
   CostFunctionAdapter cfa(*cost_function);
   EXPECT_EQ(CostFunctionAdapter::NUM_RESIDUALS, kNumResiduals);
   EXPECT_EQ(CostFunctionAdapter::NUM_PARAMETERS, kNumParameters);
@@ -80,7 +81,7 @@
   Eigen::Matrix<double, 2, 3, Eigen::ColMajor> actual_jacobian;
   Eigen::Matrix<double, 2, 3, Eigen::RowMajor> expected_jacobian;
 
-  double xyz[3] = { 1.0, -1.0, 2.0};
+  double xyz[3] = {1.0, -1.0, 2.0};
   double* parameters[1] = {xyz};
 
   // Check that residual only evaluation works.
diff --git a/internal/ceres/tiny_solver_test.cc b/internal/ceres/tiny_solver_test.cc
index 2a8cd39..2e70694 100644
--- a/internal/ceres/tiny_solver_test.cc
+++ b/internal/ceres/tiny_solver_test.cc
@@ -30,11 +30,11 @@
 // Author: mierle@gmail.com (Keir Mierle)
 
 #include "ceres/tiny_solver.h"
-#include "ceres/tiny_solver_test_util.h"
 
 #include <algorithm>
 #include <cmath>
 
+#include "ceres/tiny_solver_test_util.h"
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -66,9 +66,7 @@
     NUM_PARAMETERS = Eigen::Dynamic,
   };
 
-  int NumParameters() const {
-    return 3;
-  }
+  int NumParameters() const { return 3; }
 
   bool operator()(const double* parameters,
                   double* residuals,
@@ -85,9 +83,7 @@
     NUM_PARAMETERS = 3,
   };
 
-  int NumResiduals() const {
-    return 2;
-  }
+  int NumResiduals() const { return 2; }
 
   bool operator()(const double* parameters,
                   double* residuals,
@@ -104,13 +100,9 @@
     NUM_PARAMETERS = Eigen::Dynamic,
   };
 
-  int NumResiduals() const {
-    return 2;
-  }
+  int NumResiduals() const { return 2; }
 
-  int NumParameters() const {
-    return 3;
-  }
+  int NumParameters() const { return 3; }
 
   bool operator()(const double* parameters,
                   double* residuals,
@@ -119,7 +111,7 @@
   }
 };
 
-template<typename Function, typename Vector>
+template <typename Function, typename Vector>
 void TestHelper(const Function& f, const Vector& x0) {
   Vector x = x0;
   Vec2 residuals;
diff --git a/internal/ceres/tiny_solver_test_util.h b/internal/ceres/tiny_solver_test_util.h
index 48fe955..310bb35 100644
--- a/internal/ceres/tiny_solver_test_util.h
+++ b/internal/ceres/tiny_solver_test_util.h
@@ -34,7 +34,7 @@
 
 namespace ceres {
 
-template<typename T>
+template <typename T>
 bool EvaluateResidualsAndJacobians(const T* parameters,
                                    T* residuals,
                                    T* jacobian) {
diff --git a/internal/ceres/triplet_sparse_matrix.cc b/internal/ceres/triplet_sparse_matrix.cc
index 54b588b..5dbf0e7 100644
--- a/internal/ceres/triplet_sparse_matrix.cc
+++ b/internal/ceres/triplet_sparse_matrix.cc
@@ -43,11 +43,7 @@
 namespace internal {
 
 TripletSparseMatrix::TripletSparseMatrix()
-    : num_rows_(0),
-      num_cols_(0),
-      max_num_nonzeros_(0),
-      num_nonzeros_(0) {}
-
+    : num_rows_(0), num_cols_(0), max_num_nonzeros_(0), num_nonzeros_(0) {}
 
 TripletSparseMatrix::~TripletSparseMatrix() {}
 
@@ -111,9 +107,11 @@
 
 bool TripletSparseMatrix::AllTripletsWithinBounds() const {
   for (int i = 0; i < num_nonzeros_; ++i) {
+    // clang-format off
     if ((rows_[i] < 0) || (rows_[i] >= num_rows_) ||
         (cols_[i] < 0) || (cols_[i] >= num_cols_))
       return false;
+    // clang-format on
   }
   return true;
 }
@@ -123,8 +121,7 @@
       << "Reallocation will cause data loss";
 
   // Nothing to do if we have enough space already.
-  if (new_max_num_nonzeros <= max_num_nonzeros_)
-    return;
+  if (new_max_num_nonzeros <= max_num_nonzeros_) return;
 
   int* new_rows = new int[new_max_num_nonzeros];
   int* new_cols = new int[new_max_num_nonzeros];
@@ -168,15 +165,15 @@
   }
 }
 
-void TripletSparseMatrix::RightMultiply(const double* x,  double* y) const {
+void TripletSparseMatrix::RightMultiply(const double* x, double* y) const {
   for (int i = 0; i < num_nonzeros_; ++i) {
-    y[rows_[i]] += values_[i]*x[cols_[i]];
+    y[rows_[i]] += values_[i] * x[cols_[i]];
   }
 }
 
 void TripletSparseMatrix::LeftMultiply(const double* x, double* y) const {
   for (int i = 0; i < num_nonzeros_; ++i) {
-    y[cols_[i]] += values_[i]*x[rows_[i]];
+    y[cols_[i]] += values_[i] * x[rows_[i]];
   }
 }
 
@@ -226,10 +223,9 @@
   num_cols_ = num_cols_ + B.num_cols();
 }
 
-
 void TripletSparseMatrix::Resize(int new_num_rows, int new_num_cols) {
   if ((new_num_rows >= num_rows_) && (new_num_cols >= num_cols_)) {
-    num_rows_  = new_num_rows;
+    num_rows_ = new_num_rows;
     num_cols_ = new_num_cols;
     return;
   }
@@ -245,9 +241,9 @@
   for (int i = 0; i < num_nonzeros_; ++i) {
     if ((r_ptr[i] < num_rows_) && (c_ptr[i] < num_cols_)) {
       if (dropped_terms) {
-        r_ptr[i-dropped_terms] = r_ptr[i];
-        c_ptr[i-dropped_terms] = c_ptr[i];
-        v_ptr[i-dropped_terms] = v_ptr[i];
+        r_ptr[i - dropped_terms] = r_ptr[i];
+        c_ptr[i - dropped_terms] = c_ptr[i];
+        v_ptr[i - dropped_terms] = v_ptr[i];
       }
     } else {
       ++dropped_terms;
diff --git a/internal/ceres/triplet_sparse_matrix.h b/internal/ceres/triplet_sparse_matrix.h
index 2ee0fa9..cbda253 100644
--- a/internal/ceres/triplet_sparse_matrix.h
+++ b/internal/ceres/triplet_sparse_matrix.h
@@ -33,8 +33,9 @@
 
 #include <memory>
 #include <vector>
-#include "ceres/sparse_matrix.h"
+
 #include "ceres/internal/eigen.h"
+#include "ceres/sparse_matrix.h"
 #include "ceres/types.h"
 
 namespace ceres {
@@ -68,11 +69,13 @@
   void ScaleColumns(const double* scale) final;
   void ToDenseMatrix(Matrix* dense_matrix) const final;
   void ToTextFile(FILE* file) const final;
+  // clang-format off
   int num_rows()        const final   { return num_rows_;     }
   int num_cols()        const final   { return num_cols_;     }
   int num_nonzeros()    const final   { return num_nonzeros_; }
   const double* values()  const final { return values_.get(); }
   double* mutable_values() final      { return values_.get(); }
+  // clang-format on
   void set_num_nonzeros(int num_nonzeros);
 
   // Increase max_num_nonzeros and correspondingly increase the size
@@ -94,11 +97,13 @@
   // bounds are dropped and the num_non_zeros changed accordingly.
   void Resize(int new_num_rows, int new_num_cols);
 
+  // clang-format off
   int max_num_nonzeros() const { return max_num_nonzeros_; }
   const int* rows()      const { return rows_.get();       }
   const int* cols()      const { return cols_.get();       }
   int* mutable_rows()          { return rows_.get();       }
   int* mutable_cols()          { return cols_.get();       }
+  // clang-format on
 
   // Returns true if the entries of the matrix obey the row, column,
   // and column size bounds and false otherwise.
diff --git a/internal/ceres/triplet_sparse_matrix_test.cc b/internal/ceres/triplet_sparse_matrix_test.cc
index 881fabc..3af634f 100644
--- a/internal/ceres/triplet_sparse_matrix_test.cc
+++ b/internal/ceres/triplet_sparse_matrix_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/triplet_sparse_matrix.h"
 
 #include <memory>
+
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -309,15 +310,14 @@
 
 TEST(TripletSparseMatrix, CreateDiagonalMatrix) {
   std::unique_ptr<double[]> values(new double[10]);
-  for (int i = 0; i < 10; ++i)
-    values[i] = i;
+  for (int i = 0; i < 10; ++i) values[i] = i;
 
   std::unique_ptr<TripletSparseMatrix> m(
       TripletSparseMatrix::CreateSparseDiagonalMatrix(values.get(), 10));
   EXPECT_EQ(m->num_rows(), 10);
   EXPECT_EQ(m->num_cols(), 10);
   ASSERT_EQ(m->num_nonzeros(), 10);
-  for (int i = 0; i < 10 ; ++i) {
+  for (int i = 0; i < 10; ++i) {
     EXPECT_EQ(m->rows()[i], i);
     EXPECT_EQ(m->cols()[i], i);
     EXPECT_EQ(m->values()[i], i);
@@ -331,7 +331,7 @@
     for (int j = 0; j < 20; ++j) {
       m.mutable_rows()[nnz] = i;
       m.mutable_cols()[nnz] = j;
-      m.mutable_values()[nnz++] = i+j;
+      m.mutable_values()[nnz++] = i + j;
     }
   }
   m.set_num_nonzeros(nnz);
diff --git a/internal/ceres/trust_region_minimizer.h b/internal/ceres/trust_region_minimizer.h
index b5c4122..e18193c 100644
--- a/internal/ceres/trust_region_minimizer.h
+++ b/internal/ceres/trust_region_minimizer.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
 
 #include <memory>
+
 #include "ceres/internal/eigen.h"
 #include "ceres/minimizer.h"
 #include "ceres/solver.h"
diff --git a/internal/ceres/trust_region_minimizer_test.cc b/internal/ceres/trust_region_minimizer_test.cc
index 952f878..8993273 100644
--- a/internal/ceres/trust_region_minimizer_test.cc
+++ b/internal/ceres/trust_region_minimizer_test.cc
@@ -33,7 +33,10 @@
 // implementation, rather than having a test that goes through all the
 // Program and Problem machinery.
 
+#include "ceres/trust_region_minimizer.h"
+
 #include <cmath>
+
 #include "ceres/autodiff_cost_function.h"
 #include "ceres/cost_function.h"
 #include "ceres/dense_qr_solver.h"
@@ -43,7 +46,6 @@
 #include "ceres/linear_solver.h"
 #include "ceres/minimizer.h"
 #include "ceres/problem.h"
-#include "ceres/trust_region_minimizer.h"
 #include "ceres/trust_region_strategy.h"
 #include "gtest/gtest.h"
 
@@ -59,6 +61,7 @@
 template <bool col1, bool col2, bool col3, bool col4>
 class PowellEvaluator2 : public Evaluator {
  public:
+  // clang-format off
   PowellEvaluator2()
       : num_active_cols_(
           (col1 ? 1 : 0) +
@@ -71,6 +74,7 @@
             << col3 << " "
             << col4;
   }
+  // clang-format on
 
   virtual ~PowellEvaluator2() {}
 
@@ -111,7 +115,7 @@
             << "f3=" << f3 << ", "
             << "f4=" << f4 << ".";
 
-    *cost = (f1*f1 + f2*f2 + f3*f3 + f4*f4) / 2.0;
+    *cost = (f1 * f1 + f2 * f2 + f3 * f3 + f4 * f4) / 2.0;
 
     VLOG(1) << "Cost: " << *cost;
 
@@ -132,34 +136,42 @@
 
       int column_index = 0;
       if (col1) {
+        // clang-format off
         jacobian_matrix.col(column_index++) <<
             1.0,
             0.0,
             0.0,
             sqrt(10.0) * 2.0 * (x1 - x4) * (1.0 - x4);
+        // clang-format on
       }
       if (col2) {
+        // clang-format off
         jacobian_matrix.col(column_index++) <<
             10.0,
             0.0,
             2.0*(x2 - 2.0*x3)*(1.0 - 2.0*x3),
             0.0;
+        // clang-format on
       }
 
       if (col3) {
+        // clang-format off
         jacobian_matrix.col(column_index++) <<
             0.0,
             sqrt(5.0),
             2.0*(x2 - 2.0*x3)*(x2 - 2.0),
             0.0;
+        // clang-format on
       }
 
       if (col4) {
+        // clang-format off
         jacobian_matrix.col(column_index++) <<
             0.0,
             -sqrt(5.0),
             0.0,
             sqrt(10.0) * 2.0 * (x1 - x4) * (x1 - 1.0);
+        // clang-format on
       }
       VLOG(1) << "\n" << jacobian_matrix;
     }
@@ -167,7 +179,7 @@
     if (gradient != NULL) {
       int column_index = 0;
       if (col1) {
-        gradient[column_index++] = f1  + f4 * sqrt(10.0) * 2.0 * (x1 - x4);
+        gradient[column_index++] = f1 + f4 * sqrt(10.0) * 2.0 * (x1 - x4);
       }
 
       if (col2) {
@@ -192,16 +204,16 @@
             const double* delta,
             double* state_plus_delta) const final {
     int delta_index = 0;
-    state_plus_delta[0] = (col1  ? state[0] + delta[delta_index++] : state[0]);
-    state_plus_delta[1] = (col2  ? state[1] + delta[delta_index++] : state[1]);
-    state_plus_delta[2] = (col3  ? state[2] + delta[delta_index++] : state[2]);
-    state_plus_delta[3] = (col4  ? state[3] + delta[delta_index++] : state[3]);
+    state_plus_delta[0] = (col1 ? state[0] + delta[delta_index++] : state[0]);
+    state_plus_delta[1] = (col2 ? state[1] + delta[delta_index++] : state[1]);
+    state_plus_delta[2] = (col3 ? state[2] + delta[delta_index++] : state[2]);
+    state_plus_delta[3] = (col4 ? state[3] + delta[delta_index++] : state[3]);
     return true;
   }
 
   int NumEffectiveParameters() const final { return num_active_cols_; }
-  int NumParameters()          const final { return 4; }
-  int NumResiduals()           const final { return 4; }
+  int NumParameters() const final { return 4; }
+  int NumResiduals() const final { return 4; }
 
  private:
   const int num_active_cols_;
@@ -209,13 +221,13 @@
 
 // Templated function to hold a subset of the columns fixed and check
 // if the solver converges to the optimal values or not.
-template<bool col1, bool col2, bool col3, bool col4>
+template <bool col1, bool col2, bool col3, bool col4>
 void IsTrustRegionSolveSuccessful(TrustRegionStrategyType strategy_type) {
   Solver::Options solver_options;
   LinearSolver::Options linear_solver_options;
   DenseQRSolver linear_solver(linear_solver_options);
 
-  double parameters[4] = { 3, -1, 0, 1.0 };
+  double parameters[4] = {3, -1, 0, 1.0};
 
   // If the column is inactive, then set its value to the optimal
   // value.
@@ -263,6 +275,7 @@
   //   IsSolveSuccessful<true, true, false, true>();
 
   const TrustRegionStrategyType kStrategy = LEVENBERG_MARQUARDT;
+  // clang-format off
   IsTrustRegionSolveSuccessful<true,  true,  true,  true >(kStrategy);
   IsTrustRegionSolveSuccessful<true,  true,  true,  false>(kStrategy);
   IsTrustRegionSolveSuccessful<true,  false, true,  true >(kStrategy);
@@ -277,6 +290,7 @@
   IsTrustRegionSolveSuccessful<false, true,  false, false>(kStrategy);
   IsTrustRegionSolveSuccessful<false, false, true,  false>(kStrategy);
   IsTrustRegionSolveSuccessful<false, false, false, true >(kStrategy);
+  // clang-format on
 }
 
 TEST(TrustRegionMinimizer, PowellsSingularFunctionUsingDogleg) {
@@ -287,6 +301,7 @@
   //  IsTrustRegionSolveSuccessful<true,  true,  true,  true >(kStrategy);
 
   const TrustRegionStrategyType kStrategy = DOGLEG;
+  // clang-format off
   IsTrustRegionSolveSuccessful<true,  true,  true,  false>(kStrategy);
   IsTrustRegionSolveSuccessful<true,  false, true,  true >(kStrategy);
   IsTrustRegionSolveSuccessful<false, true,  true,  true >(kStrategy);
@@ -300,9 +315,9 @@
   IsTrustRegionSolveSuccessful<false, true,  false, false>(kStrategy);
   IsTrustRegionSolveSuccessful<false, false, true,  false>(kStrategy);
   IsTrustRegionSolveSuccessful<false, false, false, true >(kStrategy);
+  // clang-format on
 }
 
-
 class CurveCostFunction : public CostFunction {
  public:
   CurveCostFunction(int num_vertices, double target_length)
@@ -352,11 +367,11 @@
         for (int dim = 0; dim < 2; dim++) {
           jacobians[i][dim] = 0.;
 
-          if (norm_u > std::numeric_limits< double >::min()) {
+          if (norm_u > std::numeric_limits<double>::min()) {
             jacobians[i][dim] -= u[dim] / norm_u;
           }
 
-          if (norm_v > std::numeric_limits< double >::min()) {
+          if (norm_v > std::numeric_limits<double>::min()) {
             jacobians[i][dim] += v[dim] / norm_v;
           }
         }
@@ -367,8 +382,8 @@
   }
 
  private:
-  int     num_vertices_;
-  double  target_length_;
+  int num_vertices_;
+  double target_length_;
 };
 
 TEST(TrustRegionMinimizer, JacobiScalingTest) {
@@ -376,7 +391,7 @@
   std::vector<double*> y(N);
   const double pi = 3.1415926535897932384626433;
   for (int i = 0; i < N; i++) {
-    double theta = i * 2. * pi/ static_cast< double >(N);
+    double theta = i * 2. * pi / static_cast<double>(N);
     y[i] = new double[2];
     y[i][0] = cos(theta);
     y[i][1] = sin(theta);
@@ -391,7 +406,7 @@
   EXPECT_LE(summary.final_cost, 1e-10);
 
   for (int i = 0; i < N; i++) {
-    delete []y[i];
+    delete[] y[i];
   }
 }
 
@@ -403,8 +418,7 @@
   }
 
   static CostFunction* Create() {
-    return new AutoDiffCostFunction<ExpCostFunctor, 1, 1>(
-        new ExpCostFunctor);
+    return new AutoDiffCostFunction<ExpCostFunctor, 1, 1>(new ExpCostFunctor);
   }
 };
 
diff --git a/internal/ceres/trust_region_preprocessor_test.cc b/internal/ceres/trust_region_preprocessor_test.cc
index c344812..a2a9523 100644
--- a/internal/ceres/trust_region_preprocessor_test.cc
+++ b/internal/ceres/trust_region_preprocessor_test.cc
@@ -28,6 +28,8 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/trust_region_preprocessor.h"
+
 #include <array>
 #include <map>
 
@@ -35,7 +37,6 @@
 #include "ceres/problem_impl.h"
 #include "ceres/sized_cost_function.h"
 #include "ceres/solver.h"
-#include "ceres/trust_region_preprocessor.h"
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -147,8 +148,10 @@
     x_ = 1.0;
     y_ = 1.0;
     z_ = 1.0;
-    problem_.AddResidualBlock(new DummyCostFunction<1, 1, 1>, nullptr, &x_, &y_);
-    problem_.AddResidualBlock(new DummyCostFunction<1, 1, 1>, nullptr, &y_, &z_);
+    problem_.AddResidualBlock(
+        new DummyCostFunction<1, 1, 1>, nullptr, &x_, &y_);
+    problem_.AddResidualBlock(
+        new DummyCostFunction<1, 1, 1>, nullptr, &y_, &z_);
   }
 
   void PreprocessForGivenLinearSolverAndVerify(
@@ -322,8 +325,7 @@
   EXPECT_TRUE(pp.inner_iteration_minimizer.get() != nullptr);
 }
 
-TEST_F(LinearSolverAndEvaluatorCreationTest,
-       InvalidInnerIterationsOrdering) {
+TEST_F(LinearSolverAndEvaluatorCreationTest, InvalidInnerIterationsOrdering) {
   Solver::Options options;
   options.use_inner_iterations = true;
   options.inner_iteration_ordering.reset(new ParameterBlockOrdering);
diff --git a/internal/ceres/trust_region_step_evaluator.cc b/internal/ceres/trust_region_step_evaluator.cc
index 33b0c41..19045ae 100644
--- a/internal/ceres/trust_region_step_evaluator.cc
+++ b/internal/ceres/trust_region_step_evaluator.cc
@@ -28,17 +28,18 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/trust_region_step_evaluator.h"
+
 #include <algorithm>
 #include <limits>
-#include "ceres/trust_region_step_evaluator.h"
+
 #include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
 TrustRegionStepEvaluator::TrustRegionStepEvaluator(
-    const double initial_cost,
-    const int max_consecutive_nonmonotonic_steps)
+    const double initial_cost, const int max_consecutive_nonmonotonic_steps)
     : max_consecutive_nonmonotonic_steps_(max_consecutive_nonmonotonic_steps),
       minimum_cost_(initial_cost),
       current_cost_(initial_cost),
@@ -46,12 +47,10 @@
       candidate_cost_(initial_cost),
       accumulated_reference_model_cost_change_(0.0),
       accumulated_candidate_model_cost_change_(0.0),
-      num_consecutive_nonmonotonic_steps_(0){
-}
+      num_consecutive_nonmonotonic_steps_(0) {}
 
 double TrustRegionStepEvaluator::StepQuality(
-    const double cost,
-    const double model_cost_change) const {
+    const double cost, const double model_cost_change) const {
   // If the function evaluation for this step was a failure, in which
   // case the TrustRegionMinimizer would have set the cost to
   // std::numeric_limits<double>::max(). In this case, the division by
@@ -68,9 +67,8 @@
   return std::max(relative_decrease, historical_relative_decrease);
 }
 
-void TrustRegionStepEvaluator::StepAccepted(
-    const double cost,
-    const double model_cost_change) {
+void TrustRegionStepEvaluator::StepAccepted(const double cost,
+                                            const double model_cost_change) {
   // Algorithm 10.1.2 from Trust Region Methods by Conn, Gould &
   // Toint.
   //
diff --git a/internal/ceres/trust_region_strategy.cc b/internal/ceres/trust_region_strategy.cc
index 2db6a6c..7e429d5 100644
--- a/internal/ceres/trust_region_strategy.cc
+++ b/internal/ceres/trust_region_strategy.cc
@@ -31,6 +31,7 @@
 //         keir@google.com (Keir Mierle)
 
 #include "ceres/trust_region_strategy.h"
+
 #include "ceres/dogleg_strategy.h"
 #include "ceres/levenberg_marquardt_strategy.h"
 
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 5751691..48e7347 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -32,6 +32,7 @@
 #define CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
 
 #include <string>
+
 #include "ceres/internal/port.h"
 #include "ceres/linear_solver.h"
 
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
index 93c4cfc..39bb2d8 100644
--- a/internal/ceres/types.cc
+++ b/internal/ceres/types.cc
@@ -28,18 +28,22 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/types.h"
+
 #include <algorithm>
 #include <cctype>
 #include <string>
-#include "ceres/types.h"
+
 #include "glog/logging.h"
 
 namespace ceres {
 
 using std::string;
 
+// clang-format off
 #define CASESTR(x) case x: return #x
-#define STRENUM(x) if (value == #x) { *type = x; return true;}
+#define STRENUM(x) if (value == #x) { *type = x; return true; }
+// clang-format on
 
 static void UpperCase(string* input) {
   std::transform(input->begin(), input->end(), input->begin(), ::toupper);
@@ -109,8 +113,7 @@
 }
 
 bool StringToSparseLinearAlgebraLibraryType(
-    string value,
-    SparseLinearAlgebraLibraryType* type) {
+    string value, SparseLinearAlgebraLibraryType* type) {
   UpperCase(&value);
   STRENUM(SUITE_SPARSE);
   STRENUM(CX_SPARSE);
@@ -131,8 +134,7 @@
 }
 
 bool StringToDenseLinearAlgebraLibraryType(
-    string value,
-    DenseLinearAlgebraLibraryType* type) {
+    string value, DenseLinearAlgebraLibraryType* type) {
   UpperCase(&value);
   STRENUM(EIGEN);
   STRENUM(LAPACK);
@@ -236,9 +238,8 @@
   }
 }
 
-bool StringToLineSearchInterpolationType(
-    string value,
-    LineSearchInterpolationType* type) {
+bool StringToLineSearchInterpolationType(string value,
+                                         LineSearchInterpolationType* type) {
   UpperCase(&value);
   STRENUM(BISECTION);
   STRENUM(QUADRATIC);
@@ -258,8 +259,7 @@
 }
 
 bool StringToNonlinearConjugateGradientType(
-    string value,
-    NonlinearConjugateGradientType* type) {
+    string value, NonlinearConjugateGradientType* type) {
   UpperCase(&value);
   STRENUM(FLETCHER_REEVES);
   STRENUM(POLAK_RIBIERE);
@@ -267,8 +267,7 @@
   return false;
 }
 
-const char* CovarianceAlgorithmTypeToString(
-    CovarianceAlgorithmType type) {
+const char* CovarianceAlgorithmTypeToString(CovarianceAlgorithmType type) {
   switch (type) {
     CASESTR(DENSE_SVD);
     CASESTR(SPARSE_QR);
@@ -277,17 +276,15 @@
   }
 }
 
-bool StringToCovarianceAlgorithmType(
-    string value,
-    CovarianceAlgorithmType* type) {
+bool StringToCovarianceAlgorithmType(string value,
+                                     CovarianceAlgorithmType* type) {
   UpperCase(&value);
   STRENUM(DENSE_SVD);
   STRENUM(SPARSE_QR);
   return false;
 }
 
-const char* NumericDiffMethodTypeToString(
-    NumericDiffMethodType type) {
+const char* NumericDiffMethodTypeToString(NumericDiffMethodType type) {
   switch (type) {
     CASESTR(CENTRAL);
     CASESTR(FORWARD);
@@ -297,9 +294,7 @@
   }
 }
 
-bool StringToNumericDiffMethodType(
-    string value,
-    NumericDiffMethodType* type) {
+bool StringToNumericDiffMethodType(string value, NumericDiffMethodType* type) {
   UpperCase(&value);
   STRENUM(CENTRAL);
   STRENUM(FORWARD);
@@ -307,8 +302,7 @@
   return false;
 }
 
-const char* VisibilityClusteringTypeToString(
-    VisibilityClusteringType type) {
+const char* VisibilityClusteringTypeToString(VisibilityClusteringType type) {
   switch (type) {
     CASESTR(CANONICAL_VIEWS);
     CASESTR(SINGLE_LINKAGE);
@@ -317,9 +311,8 @@
   }
 }
 
-bool StringToVisibilityClusteringType(
-    string value,
-    VisibilityClusteringType* type) {
+bool StringToVisibilityClusteringType(string value,
+                                      VisibilityClusteringType* type) {
   UpperCase(&value);
   STRENUM(CANONICAL_VIEWS);
   STRENUM(SINGLE_LINKAGE);
@@ -354,9 +347,8 @@
   return false;
 }
 
-
 const char* DumpFormatTypeToString(DumpFormatType type) {
-   switch (type) {
+  switch (type) {
     CASESTR(CONSOLE);
     CASESTR(TEXTFILE);
     default:
@@ -375,9 +367,11 @@
 #undef STRENUM
 
 bool IsSchurType(LinearSolverType type) {
+  // clang-format off
   return ((type == SPARSE_SCHUR) ||
           (type == DENSE_SCHUR)  ||
           (type == ITERATIVE_SCHUR));
+  // clang-format on
 }
 
 bool IsSparseLinearAlgebraLibraryTypeAvailable(
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
index 0981eed..82bf6f1 100644
--- a/internal/ceres/visibility.cc
+++ b/internal/ceres/visibility.cc
@@ -30,13 +30,14 @@
 
 #include "ceres/visibility.h"
 
+#include <algorithm>
 #include <cmath>
 #include <ctime>
-#include <algorithm>
 #include <set>
-#include <vector>
 #include <unordered_map>
 #include <utility>
+#include <vector>
+
 #include "ceres/block_structure.h"
 #include "ceres/graph.h"
 #include "ceres/pair_hash.h"
@@ -138,9 +139,10 @@
     const int count = camera_pair_count.second;
     DCHECK_NE(camera1, camera2);
     // Static cast necessary for Windows.
-    const double weight = static_cast<double>(count) /
-        (sqrt(static_cast<double>(
-                  visibility[camera1].size() * visibility[camera2].size())));
+    const double weight =
+        static_cast<double>(count) /
+        (sqrt(static_cast<double>(visibility[camera1].size() *
+                                  visibility[camera2].size())));
     graph->AddEdge(camera1, camera2, weight);
   }
 
diff --git a/internal/ceres/visibility.h b/internal/ceres/visibility.h
index 115d45f..ed25d53 100644
--- a/internal/ceres/visibility.h
+++ b/internal/ceres/visibility.h
@@ -37,6 +37,7 @@
 
 #include <set>
 #include <vector>
+
 #include "ceres/graph.h"
 
 namespace ceres {
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index aa582d5..0457b9a 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -162,8 +162,9 @@
       std::vector<std::set<int>>* cluster_visibility) const;
   WeightedGraph<int>* CreateClusterGraph(
       const std::vector<std::set<int>>& visibility) const;
-  void ForestToClusterPairs(const WeightedGraph<int>& forest,
-                            std::unordered_set<std::pair<int, int>, pair_hash>* cluster_pairs) const;
+  void ForestToClusterPairs(
+      const WeightedGraph<int>& forest,
+      std::unordered_set<std::pair<int, int>, pair_hash>* cluster_pairs) const;
   void ComputeBlockPairsInPreconditioner(const CompressedRowBlockStructure& bs);
   bool IsBlockPairInPreconditioner(int block1, int block2) const;
   bool IsBlockPairOffDiagonal(int block1, int block2) const;
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
index a006d98..10aa619 100644
--- a/internal/ceres/visibility_based_preconditioner_test.cc
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -31,6 +31,7 @@
 #include "ceres/visibility_based_preconditioner.h"
 
 #include <memory>
+
 #include "Eigen/Dense"
 #include "ceres/block_random_access_dense_matrix.h"
 #include "ceres/block_random_access_sparse_matrix.h"
diff --git a/internal/ceres/visibility_test.cc b/internal/ceres/visibility_test.cc
index 5028e01..a199963 100644
--- a/internal/ceres/visibility_test.cc
+++ b/internal/ceres/visibility_test.cc
@@ -46,8 +46,7 @@
 using std::set;
 using std::vector;
 
-class VisibilityTest : public ::testing::Test {
-};
+class VisibilityTest : public ::testing::Test {};
 
 TEST(VisibilityTest, SimpleMatrix) {
   //   A = [1 0 0 0 0 1
@@ -100,14 +99,15 @@
   }
   bs.cols.resize(num_cols);
 
-  vector< set<int>> visibility;
+  vector<set<int>> visibility;
   ComputeVisibility(bs, num_eliminate_blocks, &visibility);
   ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
   for (int i = 0; i < visibility.size(); ++i) {
     ASSERT_EQ(visibility[i].size(), 1);
   }
 
-  std::unique_ptr<WeightedGraph<int> > graph(CreateSchurComplementGraph(visibility));
+  std::unique_ptr<WeightedGraph<int>> graph(
+      CreateSchurComplementGraph(visibility));
   EXPECT_EQ(graph->vertices().size(), visibility.size());
   for (int i = 0; i < visibility.size(); ++i) {
     EXPECT_EQ(graph->VertexWeight(i), 1.0);
@@ -121,14 +121,12 @@
       }
 
       EXPECT_EQ(graph->EdgeWeight(i, j), edge_weight)
-          << "Edge: " << i << " " << j
-          << " weight: " << graph->EdgeWeight(i, j)
+          << "Edge: " << i << " " << j << " weight: " << graph->EdgeWeight(i, j)
           << " expected weight: " << edge_weight;
     }
   }
 }
 
-
 TEST(VisibilityTest, NoEBlocks) {
   //   A = [1 0 0 0 0 0
   //        1 0 0 0 0 0
@@ -183,8 +181,8 @@
     ASSERT_EQ(visibility[i].size(), 0);
   }
 
-  std::unique_ptr<WeightedGraph<int> > graph(
-					     CreateSchurComplementGraph(visibility));
+  std::unique_ptr<WeightedGraph<int>> graph(
+      CreateSchurComplementGraph(visibility));
   EXPECT_EQ(graph->vertices().size(), visibility.size());
   for (int i = 0; i < visibility.size(); ++i) {
     EXPECT_EQ(graph->VertexWeight(i), 1.0);
@@ -197,8 +195,7 @@
         edge_weight = 1.0;
       }
       EXPECT_EQ(graph->EdgeWeight(i, j), edge_weight)
-          << "Edge: " << i << " " << j
-          << " weight: " << graph->EdgeWeight(i, j)
+          << "Edge: " << i << " " << j << " weight: " << graph->EdgeWeight(i, j)
           << " expected weight: " << edge_weight;
     }
   }
diff --git a/internal/ceres/wall_time.h b/internal/ceres/wall_time.h
index ed0610f..3f465e8 100644
--- a/internal/ceres/wall_time.h
+++ b/internal/ceres/wall_time.h
@@ -33,6 +33,7 @@
 
 #include <map>
 #include <string>
+
 #include "ceres/internal/port.h"
 #include "ceres/stringprintf.h"
 #include "glog/logging.h"
diff --git a/scripts/format_all.sh b/scripts/format_all.sh
new file mode 100755
index 0000000..565ac0c
--- /dev/null
+++ b/scripts/format_all.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+# Format all source files in the project.
+#
+# Set CLANG_FORMAT_CMD environment variable to specify executable used (default: `clang-format`).
+
+set -e
+
+################################################################################
+# Configuration
+
+# folders to search
+FOLDERS="
+    include
+    internal
+    examples
+"
+
+# paths to ignore
+EXCLUDE_PATHS="
+    internal/ceres/gtest/*
+    internal/ceres/gmock/*
+    internal/ceres/gmock_gtest_all.cc
+    internal/ceres/gmock_main.cc
+    internal/ceres/generated/*
+    internal/ceres/generated_bundle_adjustment_tests/*
+    internal/ceres/schur_eliminator.cc
+    internal/ceres/partitioned_matrix_view.cc
+    internal/ceres/schur_templates.cc
+"
+
+################################################################################
+# Implementation
+
+# directory of this script and the repository root
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+REPO_DIR="$SCRIPT_DIR/.."
+
+# set default for CLANG_FORMAT_CMD
+CLANG_FORMAT_CMD=${CLANG_FORMAT_CMD:-clang-format}
+echo "Formatting with $CLANG_FORMAT_CMD (`$CLANG_FORMAT_CMD --version`)"
+
+# prepare arguments to exclude ignored paths
+EXCLUDE_ARGS=""
+for p in $EXCLUDE_PATHS; do
+    EXCLUDE_ARGS="-not -path */$p $EXCLUDE_ARGS"
+done
+
+# for each folder, format header and source dirs
+for d in $FOLDERS; do
+    d="$REPO_DIR/$d"
+    find "$d" \( -name "*.h" -or -name "*.cc" \) $EXCLUDE_ARGS | xargs $CLANG_FORMAT_CMD -verbose -i
+done