clang-formated source

Change-Id: Ia15a55ae053e38d19e1fe7bbc6e452f5d0d1d3ed
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index 015e170..bed4ad0 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -239,9 +239,8 @@
     : public CudaPreconditioner {
  public:
   explicit CudaJacobiPreconditioner(ContextImpl* context,
-                                    const CompressedRowSparseMatrix& A) :
-      cpu_preconditioner_(A),
-      m_(context, cpu_preconditioner_.matrix()) {}
+                                    const CompressedRowSparseMatrix& A)
+      : cpu_preconditioner_(A), m_(context, cpu_preconditioner_.matrix()) {}
   ~CudaJacobiPreconditioner() = default;
 
   void Update(const CompressedRowSparseMatrix& A, const double* D) final {
@@ -249,8 +248,7 @@
     m_.CopyValuesFromCpu(cpu_preconditioner_.matrix());
   }
 
-  void RightMultiplyAndAccumulate(
-      const CudaVector& x, CudaVector& y) final {
+  void RightMultiplyAndAccumulate(const CudaVector& x, CudaVector& y) final {
     m_.RightMultiplyAndAccumulate(x, &y);
   }
 
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h
index 5111d80..916a806 100644
--- a/internal/ceres/cgnr_solver.h
+++ b/internal/ceres/cgnr_solver.h
@@ -72,8 +72,7 @@
 };
 
 #ifndef CERES_NO_CUDA
-class CudaPreconditioner : public
-    ConjugateGradientsLinearOperator<CudaVector> {
+class CudaPreconditioner : public ConjugateGradientsLinearOperator<CudaVector> {
  public:
   virtual void Update(const CompressedRowSparseMatrix& A, const double* D) = 0;
   virtual ~CudaPreconditioner() = default;
diff --git a/internal/ceres/context_impl.cc b/internal/ceres/context_impl.cc
index a46e760..0ad2abe 100644
--- a/internal/ceres/context_impl.cc
+++ b/internal/ceres/context_impl.cc
@@ -116,35 +116,38 @@
   CHECK_EQ(cudaRuntimeGetVersion(&cuda_version), cudaSuccess);
   cuda_version_major_ = cuda_version / 1000;
   cuda_version_minor_ = (cuda_version % 1000) / 10;
-  CHECK_EQ(cudaGetDeviceProperties(&gpu_device_properties_,
-                                   gpu_device_id_in_use_), cudaSuccess);
+  CHECK_EQ(
+      cudaGetDeviceProperties(&gpu_device_properties_, gpu_device_id_in_use_),
+      cudaSuccess);
   VLOG(3) << "\n" << CudaConfigAsString();
   EventLogger event_logger("InitCuda");
   if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) {
-    *message = "CUDA initialization failed because "
-        "cuBLAS::cublasCreate failed.";
+    *message =
+        "CUDA initialization failed because cuBLAS::cublasCreate failed.";
     cublas_handle_ = nullptr;
     return false;
   }
   event_logger.AddEvent("cublasCreate");
   if (cusolverDnCreate(&cusolver_handle_) != CUSOLVER_STATUS_SUCCESS) {
-    *message = "CUDA initialization failed because "
-        "cuSolverDN::cusolverDnCreate failed.";
+    *message =
+        "CUDA initialization failed because cuSolverDN::cusolverDnCreate "
+        "failed.";
     TearDown();
     return false;
   }
   event_logger.AddEvent("cusolverDnCreate");
   if (cusparseCreate(&cusparse_handle_) != CUSPARSE_STATUS_SUCCESS) {
-    *message = "CUDA initialization failed because "
-        "cuSPARSE::cusparseCreate failed.";
+    *message =
+        "CUDA initialization failed because cuSPARSE::cusparseCreate failed.";
     TearDown();
     return false;
   }
   event_logger.AddEvent("cusparseCreate");
   if (cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) !=
       cudaSuccess) {
-    *message = "CUDA initialization failed because "
-        "CUDA::cudaStreamCreateWithFlags failed.";
+    *message =
+        "CUDA initialization failed because CUDA::cudaStreamCreateWithFlags "
+        "failed.";
     TearDown();
     return false;
   }
diff --git a/internal/ceres/context_impl.h b/internal/ceres/context_impl.h
index 9eb59eb..ee0c589 100644
--- a/internal/ceres/context_impl.h
+++ b/internal/ceres/context_impl.h
@@ -92,7 +92,7 @@
   // CUDA device. CudaConfigAsString can only be called after InitCuda has been
   // called.
   std::string CudaConfigAsString() const;
-  // Returns the number of bytes of available global memory on the current CUDA 
+  // Returns the number of bytes of available global memory on the current CUDA
   // device. If it is called before InitCuda, it returns 0.
   size_t GpuMemoryAvailable() const;
 
diff --git a/internal/ceres/cuda_sparse_matrix.cc b/internal/ceres/cuda_sparse_matrix.cc
index a3ed019..1e361d2 100644
--- a/internal/ceres/cuda_sparse_matrix.cc
+++ b/internal/ceres/cuda_sparse_matrix.cc
@@ -110,7 +110,7 @@
   // CUSPARSE_SPMV_ALG_DEFAULT.
 #if CUDART_VERSION >= 11021
   const auto algorithm = CUSPARSE_SPMV_ALG_DEFAULT;
-#else  // CUDART_VERSION >= 11021
+#else   // CUDART_VERSION >= 11021
   const auto algorithm = CUSPARSE_MV_ALG_DEFAULT;
 #endif  // CUDART_VERSION >= 11021
 
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index c896e32..07e7216 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -329,7 +329,7 @@
 
   if (options.preconditioner_type == SUBSET) {
     if (options.sparse_linear_algebra_library_type == CUDA_SPARSE) {
-      *error = 
+      *error =
           "Can't use CGNR with preconditioner_type = SUBSET when "
           "sparse_linear_algebra_library_type = CUDA_SPARSE.";
       return false;