ClangTidy cleanups

1. NULL -> nullptr
2. foo.reset(new Bar) -> = foo = std::make_unique<Bar>()
3. Missing std library includes & prefixes

Change-Id: I260b261b484554be681ee5a7398126fdb3b3a789
diff --git a/examples/nist.cc b/examples/nist.cc
index 3944586..a3430ca 100644
--- a/examples/nist.cc
+++ b/examples/nist.cc
@@ -553,7 +553,7 @@
           model, ceres::TAKE_OWNERSHIP, num_observations, options);
     } else {
       LOG(ERROR) << "Invalid numeric diff method specified";
-      return 0;
+      return nullptr;
     }
   } else {
     cost_function =
diff --git a/examples/sampled_function/README.md b/examples/sampled_function/README.md
index ef1af43..5fde415 100644
--- a/examples/sampled_function/README.md
+++ b/examples/sampled_function/README.md
@@ -32,7 +32,7 @@
 
 ```c++
 bool Evaluate(double const* const* parameters, double* residuals, double** jacobians) const {
-  if (jacobians == NULL || jacobians[0] == NULL)
+  if (jacobians == nullptr || jacobians[0] == nullptr)
     interpolator_.Evaluate(parameters[0][0], residuals);
   else
     interpolator_.Evaluate(parameters[0][0], residuals, jacobians[0]);
diff --git a/examples/sampled_function/sampled_function.cc b/examples/sampled_function/sampled_function.cc
index e96018d..ee7e5ed 100644
--- a/examples/sampled_function/sampled_function.cc
+++ b/examples/sampled_function/sampled_function.cc
@@ -82,7 +82,7 @@
   double x = 1.0;
   Problem problem;
   CostFunction* cost_function = InterpolatedCostFunctor::Create(interpolator);
-  problem.AddResidualBlock(cost_function, NULL, &x);
+  problem.AddResidualBlock(cost_function, nullptr, &x);
 
   Solver::Options options;
   options.minimizer_progress_to_stdout = true;
diff --git a/internal/ceres/accelerate_sparse.cc b/internal/ceres/accelerate_sparse.cc
index d2b642b..6cb9382 100644
--- a/internal/ceres/accelerate_sparse.cc
+++ b/internal/ceres/accelerate_sparse.cc
@@ -196,17 +196,17 @@
 LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
     CompressedRowSparseMatrix* lhs, std::string* message) {
   CHECK_EQ(lhs->storage_type(), StorageType());
-  if (lhs == NULL) {
-    *message = "Failure: Input lhs is NULL.";
+  if (lhs == nullptr) {
+    *message = "Failure: Input lhs is nullptr.";
     return LINEAR_SOLVER_FATAL_ERROR;
   }
   typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
       as_.CreateSparseMatrixTransposeView(lhs);
 
   if (!symbolic_factor_) {
-    symbolic_factor_.reset(
-        new typename SparseTypesTrait<Scalar>::SymbolicFactorization(
-            as_.AnalyzeCholesky(&as_lhs)));
+    symbolic_factor_ = std::make_unique<
+        typename SparseTypesTrait<Scalar>::SymbolicFactorization>(
+        as_.AnalyzeCholesky(&as_lhs));
     if (symbolic_factor_->status != SparseStatusOK) {
       *message = StringPrintf(
           "Apple Accelerate Failure : Symbolic factorisation failed: %s",
@@ -217,9 +217,9 @@
   }
 
   if (!numeric_factor_) {
-    numeric_factor_.reset(
-        new typename SparseTypesTrait<Scalar>::NumericFactorization(
-            as_.Cholesky(&as_lhs, symbolic_factor_.get())));
+    numeric_factor_ = std::make_unique<
+        typename SparseTypesTrait<Scalar>::NumericFactorization>(
+        as_.Cholesky(&as_lhs, symbolic_factor_.get()));
   } else {
     // Recycle memory from previous numeric factorization.
     as_.Cholesky(&as_lhs, numeric_factor_.get());
@@ -265,7 +265,7 @@
 void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
   if (symbolic_factor_) {
     SparseCleanup(*symbolic_factor_);
-    symbolic_factor_.reset();
+    symbolic_factor_ = nullptr;
   }
 }
 
@@ -273,7 +273,7 @@
 void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
   if (numeric_factor_) {
     SparseCleanup(*numeric_factor_);
-    numeric_factor_.reset();
+    numeric_factor_ = nullptr;
   }
 }
 
diff --git a/internal/ceres/array_utils.cc b/internal/ceres/array_utils.cc
index 6bffd84..113d41c 100644
--- a/internal/ceres/array_utils.cc
+++ b/internal/ceres/array_utils.cc
@@ -44,7 +44,7 @@
 using std::string;
 
 bool IsArrayValid(const int size, const double* x) {
-  if (x != NULL) {
+  if (x != nullptr) {
     for (int i = 0; i < size; ++i) {
       if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
         return false;
@@ -55,7 +55,7 @@
 }
 
 int FindInvalidValue(const int size, const double* x) {
-  if (x == NULL) {
+  if (x == nullptr) {
     return size;
   }
 
@@ -69,7 +69,7 @@
 }
 
 void InvalidateArray(const int size, double* x) {
-  if (x != NULL) {
+  if (x != nullptr) {
     for (int i = 0; i < size; ++i) {
       x[i] = kImpossibleValue;
     }
@@ -78,7 +78,7 @@
 
 void AppendArrayToString(const int size, const double* x, string* result) {
   for (int i = 0; i < size; ++i) {
-    if (x == NULL) {
+    if (x == nullptr) {
       StringAppendF(result, "Not Computed  ");
     } else {
       if (x[i] == kImpossibleValue) {
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h
index 68feca5..5264ee6 100644
--- a/internal/ceres/array_utils.h
+++ b/internal/ceres/array_utils.h
@@ -64,7 +64,7 @@
 CERES_EXPORT_INTERNAL int FindInvalidValue(const int size, const double* x);
 
 // Utility routine to print an array of doubles to a string. If the
-// array pointer is NULL, it is treated as an array of zeros.
+// array pointer is nullptr, it is treated as an array of zeros.
 CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
                                                const double* x,
                                                std::string* result);
diff --git a/internal/ceres/array_utils_test.cc b/internal/ceres/array_utils_test.cc
index 6c0ea84..b011520 100644
--- a/internal/ceres/array_utils_test.cc
+++ b/internal/ceres/array_utils_test.cc
@@ -53,7 +53,7 @@
   EXPECT_FALSE(IsArrayValid(3, x));
   x[1] = std::numeric_limits<double>::signaling_NaN();
   EXPECT_FALSE(IsArrayValid(3, x));
-  EXPECT_TRUE(IsArrayValid(1, NULL));
+  EXPECT_TRUE(IsArrayValid(1, nullptr));
   InvalidateArray(3, x);
   EXPECT_FALSE(IsArrayValid(3, x));
 }
@@ -70,7 +70,7 @@
   EXPECT_EQ(FindInvalidValue(3, x), 1);
   x[1] = std::numeric_limits<double>::signaling_NaN();
   EXPECT_EQ(FindInvalidValue(3, x), 1);
-  EXPECT_EQ(FindInvalidValue(1, NULL), 1);
+  EXPECT_EQ(FindInvalidValue(1, nullptr), 1);
   InvalidateArray(3, x);
   EXPECT_EQ(FindInvalidValue(3, x), 0);
 }
diff --git a/internal/ceres/block_evaluate_preparer.cc b/internal/ceres/block_evaluate_preparer.cc
index 7db96d9..56c97b6 100644
--- a/internal/ceres/block_evaluate_preparer.cc
+++ b/internal/ceres/block_evaluate_preparer.cc
@@ -53,7 +53,7 @@
                                     SparseMatrix* jacobian,
                                     double** jacobians) {
   // If the overall jacobian is not available, use the scratch space.
-  if (jacobian == NULL) {
+  if (jacobian == nullptr) {
     scratch_evaluate_preparer_.Prepare(
         residual_block, residual_block_index, jacobian, jacobians);
     return;
@@ -73,7 +73,7 @@
       // parameters. Instead, bump the pointer for active parameters only.
       jacobian_block_offset++;
     } else {
-      jacobians[j] = NULL;
+      jacobians[j] = nullptr;
     }
   }
 }
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
index 7459310..da8fc94 100644
--- a/internal/ceres/block_jacobi_preconditioner.cc
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -47,7 +47,7 @@
     blocks[i] = bs->cols[i].size;
   }
 
-  m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
+  m_ = std::make_unique<BlockRandomAccessDiagonalMatrix>(blocks);
 }
 
 BlockJacobiPreconditioner::~BlockJacobiPreconditioner() = default;
@@ -74,7 +74,7 @@
     }
   }
 
-  if (D != NULL) {
+  if (D != nullptr) {
     // Add the diagonal.
     int position = 0;
     for (int i = 0; i < bs->cols.size(); ++i) {
diff --git a/internal/ceres/block_jacobi_preconditioner_test.cc b/internal/ceres/block_jacobi_preconditioner_test.cc
index cc582c6..6d124f1 100644
--- a/internal/ceres/block_jacobi_preconditioner_test.cc
+++ b/internal/ceres/block_jacobi_preconditioner_test.cc
@@ -45,12 +45,12 @@
 class BlockJacobiPreconditionerTest : public ::testing::Test {
  protected:
   void SetUpFromProblemId(int problem_id) {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(problem_id));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(problem_id);
 
     CHECK(problem != nullptr);
     A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
-    D.reset(problem->D.release());
+    D = std::move(problem->D);
 
     Matrix dense_a;
     A->ToDenseMatrix(&dense_a);
diff --git a/internal/ceres/block_jacobian_writer.cc b/internal/ceres/block_jacobian_writer.cc
index e589c34..a5020e6 100644
--- a/internal/ceres/block_jacobian_writer.cc
+++ b/internal/ceres/block_jacobian_writer.cc
@@ -30,6 +30,8 @@
 
 #include "ceres/block_jacobian_writer.h"
 
+#include <algorithm>
+
 #include "ceres/block_evaluate_preparer.h"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/internal/eigen.h"
@@ -148,7 +150,7 @@
   return preparers;
 }
 
-SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
+std::unique_ptr<SparseMatrix> BlockJacobianWriter::CreateJacobian() const {
   CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
 
   const vector<ParameterBlock*>& parameter_blocks =
@@ -201,12 +203,10 @@
       }
     }
 
-    sort(row->cells.begin(), row->cells.end(), CellLessThan);
+    std::sort(row->cells.begin(), row->cells.end(), CellLessThan);
   }
 
-  BlockSparseMatrix* jacobian = new BlockSparseMatrix(bs);
-  CHECK(jacobian != nullptr);
-  return jacobian;
+  return std::make_unique<BlockSparseMatrix>(bs);
 }
 
 }  // namespace internal
diff --git a/internal/ceres/block_jacobian_writer.h b/internal/ceres/block_jacobian_writer.h
index 8054d7b..b9c918b 100644
--- a/internal/ceres/block_jacobian_writer.h
+++ b/internal/ceres/block_jacobian_writer.h
@@ -61,7 +61,7 @@
   // This makes the final Write() a nop.
   BlockEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
 
-  SparseMatrix* CreateJacobian() const;
+  std::unique_ptr<SparseMatrix> CreateJacobian() const;
 
   void Write(int /* residual_id */,
              int /* residual_offset */,
diff --git a/internal/ceres/block_random_access_dense_matrix.cc b/internal/ceres/block_random_access_dense_matrix.cc
index f2ac0b8..ed172de 100644
--- a/internal/ceres/block_random_access_dense_matrix.cc
+++ b/internal/ceres/block_random_access_dense_matrix.cc
@@ -48,9 +48,9 @@
     num_rows_ += blocks[i];
   }
 
-  values_.reset(new double[num_rows_ * num_rows_]);
+  values_ = std::make_unique<double[]>(num_rows_ * num_rows_);
 
-  cell_infos_.reset(new CellInfo[num_blocks * num_blocks]);
+  cell_infos_ = std::make_unique<CellInfo[]>(num_blocks * num_blocks);
   for (int i = 0; i < num_blocks * num_blocks; ++i) {
     cell_infos_[i].values = values_.get();
   }
diff --git a/internal/ceres/block_random_access_dense_matrix.h b/internal/ceres/block_random_access_dense_matrix.h
index 21007de..aef252e 100644
--- a/internal/ceres/block_random_access_dense_matrix.h
+++ b/internal/ceres/block_random_access_dense_matrix.h
@@ -46,7 +46,7 @@
 // num_rows x num_cols.
 //
 // This class is NOT thread safe. Since all n^2 cells are stored,
-// GetCell never returns NULL for any (row_block_id, col_block_id)
+// GetCell never returns nullptr for any (row_block_id, col_block_id)
 // pair.
 //
 // ReturnCell is a nop.
diff --git a/internal/ceres/block_random_access_dense_matrix_test.cc b/internal/ceres/block_random_access_dense_matrix_test.cc
index 0736d56..c85c388 100644
--- a/internal/ceres/block_random_access_dense_matrix_test.cc
+++ b/internal/ceres/block_random_access_dense_matrix_test.cc
@@ -58,7 +58,7 @@
       int col_stride;
       CellInfo* cell = m.GetCell(i, j, &row, &col, &row_stride, &col_stride);
 
-      EXPECT_TRUE(cell != NULL);
+      EXPECT_TRUE(cell != nullptr);
       EXPECT_EQ(row, row_idx);
       EXPECT_EQ(col, col_idx);
       EXPECT_EQ(row_stride, 3 + 4 + 5);
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
index 08f6d7f..1b02155 100644
--- a/internal/ceres/block_random_access_diagonal_matrix.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -31,6 +31,7 @@
 #include "ceres/block_random_access_diagonal_matrix.h"
 
 #include <algorithm>
+#include <memory>
 #include <set>
 #include <utility>
 #include <vector>
@@ -66,7 +67,8 @@
   VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
           << num_nonzeros;
 
-  tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+  tsm_ =
+      std::make_unique<TripletSparseMatrix>(num_cols, num_cols, num_nonzeros);
   tsm_->set_num_nonzeros(num_nonzeros);
   int* rows = tsm_->mutable_rows();
   int* cols = tsm_->mutable_cols();
@@ -99,7 +101,7 @@
                                                    int* row_stride,
                                                    int* col_stride) {
   if (row_block_id != col_block_id) {
-    return NULL;
+    return nullptr;
   }
   const int stride = blocks_[row_block_id];
 
diff --git a/internal/ceres/block_random_access_diagonal_matrix_test.cc b/internal/ceres/block_random_access_diagonal_matrix_test.cc
index afd95ee..42a309f 100644
--- a/internal/ceres/block_random_access_diagonal_matrix_test.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix_test.cc
@@ -52,7 +52,7 @@
     const int num_rows = 3 + 4 + 5;
     num_nonzeros_ = 3 * 3 + 4 * 4 + 5 * 5;
 
-    m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
+    m_ = std::make_unique<BlockRandomAccessDiagonalMatrix>(blocks);
 
     EXPECT_EQ(m_->num_rows(), num_rows);
     EXPECT_EQ(m_->num_cols(), num_rows);
@@ -71,11 +71,11 @@
             row_block_id, col_block_id, &row, &col, &row_stride, &col_stride);
         // Off diagonal entries are not present.
         if (i != j) {
-          EXPECT_TRUE(cell == NULL);
+          EXPECT_TRUE(cell == nullptr);
           continue;
         }
 
-        EXPECT_TRUE(cell != NULL);
+        EXPECT_TRUE(cell != nullptr);
         EXPECT_EQ(row, 0);
         EXPECT_EQ(col, 0);
         EXPECT_EQ(row_stride, blocks[row_block_id]);
diff --git a/internal/ceres/block_random_access_matrix.h b/internal/ceres/block_random_access_matrix.h
index f190622..7f01763 100644
--- a/internal/ceres/block_random_access_matrix.h
+++ b/internal/ceres/block_random_access_matrix.h
@@ -62,7 +62,7 @@
 //
 // There is no requirement that all cells be present, i.e. the matrix
 // itself can be block sparse. When a cell is not present, the GetCell
-// method will return a NULL pointer.
+// method will return a nullptr pointer.
 //
 // There is no requirement about how the cells are stored beyond that
 // form a dense submatrix of a larger dense matrix. Like everywhere
@@ -77,7 +77,7 @@
 //                              &row, &col,
 //                              &row_stride, &col_stride);
 //
-//  if (cell != NULL) {
+//  if (cell != nullptr) {
 //     MatrixRef m(cell->values, row_stride, col_stride);
 //     std::lock_guard<std::mutex> l(&cell->m);
 //     m.block(row, col, row_block_size, col_block_size) = ...
@@ -99,7 +99,7 @@
 
   // If the cell (row_block_id, col_block_id) is present, then return
   // a CellInfo with a pointer to the dense matrix containing it,
-  // otherwise return NULL. The dense matrix containing this cell has
+  // otherwise return nullptr. The dense matrix containing this cell has
   // size row_stride, col_stride and the cell is located at position
   // (row, col) within this matrix.
   //
diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
index c28b7ce..1b2f5e7 100644
--- a/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/internal/ceres/block_random_access_sparse_matrix.cc
@@ -76,7 +76,8 @@
   VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
           << num_nonzeros;
 
-  tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+  tsm_ =
+      std::make_unique<TripletSparseMatrix>(num_cols, num_cols, num_nonzeros);
   tsm_->set_num_nonzeros(num_nonzeros);
   int* rows = tsm_->mutable_rows();
   int* cols = tsm_->mutable_cols();
@@ -129,7 +130,7 @@
   const LayoutType::iterator it =
       layout_.find(IntPairToLong(row_block_id, col_block_id));
   if (it == layout_.end()) {
-    return NULL;
+    return nullptr;
   }
 
   // Each cell is stored contiguously as its own little dense matrix.
diff --git a/internal/ceres/block_random_access_sparse_matrix_test.cc b/internal/ceres/block_random_access_sparse_matrix_test.cc
index 557b678..7224b65 100644
--- a/internal/ceres/block_random_access_sparse_matrix_test.cc
+++ b/internal/ceres/block_random_access_sparse_matrix_test.cc
@@ -80,7 +80,7 @@
     int col_stride;
     CellInfo* cell = m.GetCell(
         row_block_id, col_block_id, &row, &col, &row_stride, &col_stride);
-    EXPECT_TRUE(cell != NULL);
+    EXPECT_TRUE(cell != nullptr);
     EXPECT_EQ(row, 0);
     EXPECT_EQ(col, 0);
     EXPECT_EQ(row_stride, blocks[row_block_id]);
@@ -142,7 +142,7 @@
     blocks.push_back(1);
     set<pair<int, int>> block_pairs;
     block_pairs.insert(make_pair(0, 0));
-    m_.reset(new BlockRandomAccessSparseMatrix(blocks, block_pairs));
+    m_ = std::make_unique<BlockRandomAccessSparseMatrix>(blocks, block_pairs);
   }
 
   void CheckIntPairToLong(int a, int b) {
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
index 4db409d..e06bd2f 100644
--- a/internal/ceres/block_sparse_matrix.cc
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -80,7 +80,7 @@
   CHECK_GE(num_nonzeros_, 0);
   VLOG(2) << "Allocating values array with " << num_nonzeros_ * sizeof(double)
           << " bytes.";  // NOLINT
-  values_.reset(new double[num_nonzeros_]);
+  values_ = std::make_unique<double[]>(num_nonzeros_);
   max_num_nonzeros_ = num_nonzeros_;
   CHECK(values_ != nullptr);
 }
@@ -248,7 +248,7 @@
   }
 }
 
-BlockSparseMatrix* BlockSparseMatrix::CreateDiagonalMatrix(
+std::unique_ptr<BlockSparseMatrix> BlockSparseMatrix::CreateDiagonalMatrix(
     const double* diagonal, const std::vector<Block>& column_blocks) {
   // Create the block structure for the diagonal matrix.
   CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
@@ -265,7 +265,7 @@
   }
 
   // Create the BlockSparseMatrix with the given block structure.
-  BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
+  auto matrix = std::make_unique<BlockSparseMatrix>(bs);
   matrix->SetZero();
 
   // Fill the values array of the block sparse matrix.
@@ -308,9 +308,10 @@
   }
 
   if (num_nonzeros_ > max_num_nonzeros_) {
-    double* new_values = new double[num_nonzeros_];
-    std::copy(values_.get(), values_.get() + old_num_nonzeros, new_values);
-    values_.reset(new_values);
+    std::unique_ptr<double[]> new_values =
+        std::make_unique<double[]>(num_nonzeros_);
+    std::copy_n(values_.get(), old_num_nonzeros, new_values.get());
+    values_ = std::move(new_values);
     max_num_nonzeros_ = num_nonzeros_;
   }
 
@@ -337,7 +338,7 @@
   block_structure_->rows.resize(num_row_blocks - delta_row_blocks);
 }
 
-BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
+std::unique_ptr<BlockSparseMatrix> BlockSparseMatrix::CreateRandomMatrix(
     const BlockSparseMatrix::RandomMatrixOptions& options) {
   CHECK_GT(options.num_row_blocks, 0);
   CHECK_GT(options.min_row_block_size, 0);
@@ -395,7 +396,7 @@
     }
   }
 
-  BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
+  auto matrix = std::make_unique<BlockSparseMatrix>(bs);
   double* values = matrix->mutable_values();
   for (int i = 0; i < matrix->num_nonzeros(); ++i) {
     values[i] = RandNormal();
diff --git a/internal/ceres/block_sparse_matrix.h b/internal/ceres/block_sparse_matrix.h
index ecf6263..0707546 100644
--- a/internal/ceres/block_sparse_matrix.h
+++ b/internal/ceres/block_sparse_matrix.h
@@ -97,7 +97,7 @@
   // Delete the bottom delta_rows_blocks.
   void DeleteRowBlocks(int delta_row_blocks);
 
-  static BlockSparseMatrix* CreateDiagonalMatrix(
+  static std::unique_ptr<BlockSparseMatrix> CreateDiagonalMatrix(
       const double* diagonal, const std::vector<Block>& column_blocks);
 
   struct RandomMatrixOptions {
@@ -122,9 +122,7 @@
   // Create a random BlockSparseMatrix whose entries are normally
   // distributed and whose structure is determined by
   // RandomMatrixOptions.
-  //
-  // Caller owns the result.
-  static BlockSparseMatrix* CreateRandomMatrix(
+  static std::unique_ptr<BlockSparseMatrix> CreateRandomMatrix(
       const RandomMatrixOptions& options);
 
  private:
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
index 02d3fb1..747cdef 100644
--- a/internal/ceres/block_sparse_matrix_test.cc
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -46,12 +46,12 @@
 class BlockSparseMatrixTest : public ::testing::Test {
  protected:
   void SetUp() final {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(2));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(2);
     CHECK(problem != nullptr);
     A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
 
-    problem.reset(CreateLinearLeastSquaresProblemFromId(1));
+    problem = CreateLinearLeastSquaresProblemFromId(1);
     CHECK(problem != nullptr);
     B_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
 
@@ -110,15 +110,15 @@
 }
 
 TEST_F(BlockSparseMatrixTest, AppendRows) {
-  std::unique_ptr<LinearLeastSquaresProblem> problem(
-      CreateLinearLeastSquaresProblemFromId(2));
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      CreateLinearLeastSquaresProblemFromId(2);
   std::unique_ptr<BlockSparseMatrix> m(
       down_cast<BlockSparseMatrix*>(problem->A.release()));
   A_->AppendRows(*m);
   EXPECT_EQ(A_->num_rows(), 2 * m->num_rows());
   EXPECT_EQ(A_->num_cols(), m->num_cols());
 
-  problem.reset(CreateLinearLeastSquaresProblemFromId(1));
+  problem = CreateLinearLeastSquaresProblemFromId(1);
   std::unique_ptr<TripletSparseMatrix> m2(
       down_cast<TripletSparseMatrix*>(problem->A.release()));
   B_->AppendRows(*m2);
diff --git a/internal/ceres/bundle_adjustment_test_util.h b/internal/ceres/bundle_adjustment_test_util.h
index 074931f..8af935d 100644
--- a/internal/ceres/bundle_adjustment_test_util.h
+++ b/internal/ceres/bundle_adjustment_test_util.h
@@ -149,10 +149,11 @@
       // point_index()[i] respectively.
       double* camera = cameras + 9 * camera_index_[i];
       double* point = points + 3 * point_index()[i];
-      problem_.AddResidualBlock(cost_function, NULL, camera, point);
+      problem_.AddResidualBlock(cost_function, nullptr, camera, point);
     }
 
-    options_.linear_solver_ordering.reset(new ParameterBlockOrdering);
+    options_.linear_solver_ordering =
+        std::make_shared<ParameterBlockOrdering>();
 
     // The points come before the cameras.
     for (int i = 0; i < num_points_; ++i) {
diff --git a/internal/ceres/c_api.cc b/internal/ceres/c_api.cc
index a604e56..ddb8a98 100644
--- a/internal/ceres/c_api.cc
+++ b/internal/ceres/c_api.cc
@@ -148,24 +148,25 @@
     double** parameters) {
   Problem* ceres_problem = reinterpret_cast<Problem*>(problem);
 
-  ceres::CostFunction* callback_cost_function =
-      new CallbackCostFunction(cost_function,
-                               cost_function_data,
-                               num_residuals,
-                               num_parameter_blocks,
-                               parameter_block_sizes);
+  auto callback_cost_function =
+      std::make_unique<CallbackCostFunction>(cost_function,
+                                             cost_function_data,
+                                             num_residuals,
+                                             num_parameter_blocks,
+                                             parameter_block_sizes);
 
-  ceres::LossFunction* callback_loss_function = NULL;
-  if (loss_function != NULL) {
-    callback_loss_function =
-        new CallbackLossFunction(loss_function, loss_function_data);
+  std::unique_ptr<ceres::LossFunction> callback_loss_function;
+  if (loss_function != nullptr) {
+    callback_loss_function = std::make_unique<CallbackLossFunction>(
+        loss_function, loss_function_data);
   }
 
   std::vector<double*> parameter_blocks(parameters,
                                         parameters + num_parameter_blocks);
   return reinterpret_cast<ceres_residual_block_id_t*>(
-      ceres_problem->AddResidualBlock(
-          callback_cost_function, callback_loss_function, parameter_blocks));
+      ceres_problem->AddResidualBlock(callback_cost_function.release(),
+                                      callback_loss_function.release(),
+                                      parameter_blocks));
 }
 
 void ceres_solve(ceres_problem_t* c_problem) {
diff --git a/internal/ceres/c_api_test.cc b/internal/ceres/c_api_test.cc
index 043f6ab..2473116 100644
--- a/internal/ceres/c_api_test.cc
+++ b/internal/ceres/c_api_test.cc
@@ -121,13 +121,13 @@
   double c = parameters[1][0];
 
   residuals[0] = y - exp(m * x + c);
-  if (jacobians == NULL) {
+  if (jacobians == nullptr) {
     return 1;
   }
-  if (jacobians[0] != NULL) {
+  if (jacobians[0] != nullptr) {
     jacobians[0][0] = -x * exp(m * x + c);  // dr/dm
   }
-  if (jacobians[1] != NULL) {
+  if (jacobians[1] != nullptr) {
     jacobians[1][0] = -exp(m * x + c);  // dr/dc
   }
   return 1;
@@ -148,8 +148,8 @@
         problem,
         exponential_residual,  // Cost function
         &data[2 * i],          // Points to the (x,y) measurement
-        NULL,                  // Loss function
-        NULL,                  // Loss function user data
+        nullptr,               // Loss function
+        nullptr,               // Loss function user data
         1,                     // Number of residuals
         2,                     // Number of parameter blocks
         parameter_sizes,
diff --git a/internal/ceres/callbacks.cc b/internal/ceres/callbacks.cc
index 70e2409..7a4381c 100644
--- a/internal/ceres/callbacks.cc
+++ b/internal/ceres/callbacks.cc
@@ -30,6 +30,7 @@
 
 #include "ceres/callbacks.h"
 
+#include <algorithm>
 #include <iostream>  // NO LINT
 
 #include "ceres/program.h"
@@ -69,9 +70,7 @@
 CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
     const IterationSummary& summary) {
   if (summary.step_is_successful) {
-    std::copy(internal_parameters_,
-              internal_parameters_ + num_parameters_,
-              user_parameters_);
+    std::copy_n(internal_parameters_, num_parameters_, user_parameters_);
   }
   return SOLVER_CONTINUE;
 }
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
index 740ab8e..51cd4cb 100644
--- a/internal/ceres/canonical_views_clustering.cc
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -85,11 +85,11 @@
     const WeightedGraph<int>& graph,
     vector<int>* centers,
     IntMap* membership) {
-  time_t start_time = time(NULL);
+  time_t start_time = time(nullptr);
   CanonicalViewsClustering cv;
   cv.ComputeClustering(options, graph, centers, membership);
   VLOG(2) << "Canonical views clustering time (secs): "
-          << time(NULL) - start_time;
+          << time(nullptr) - start_time;
 }
 
 // Implementation of CanonicalViewsClustering
@@ -107,7 +107,7 @@
 
   IntSet valid_views;
   FindValidViews(&valid_views);
-  while (valid_views.size() > 0) {
+  while (!valid_views.empty()) {
     // Find the next best canonical view.
     double best_difference = -std::numeric_limits<double>::max();
     int best_view = 0;
diff --git a/internal/ceres/casts.h b/internal/ceres/casts.h
index d137071..21445c8 100644
--- a/internal/ceres/casts.h
+++ b/internal/ceres/casts.h
@@ -32,7 +32,6 @@
 #define CERES_INTERNAL_CASTS_H_
 
 #include <cassert>
-#include <cstddef>  // For NULL.
 
 namespace ceres {
 
@@ -86,6 +85,7 @@
 //    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
 // You should design the code some other way not to need this.
 
+// TODO(sameeragarwal): Modernize this.
 template <typename To, typename From>  // use like this: down_cast<T*>(foo);
 inline To down_cast(From* f) {         // so we only accept pointers
   // Ensures that To is a sub-type of From *.  This test is here only
@@ -95,11 +95,11 @@
 
   // TODO(csilvers): This should use COMPILE_ASSERT.
   if (false) {
-    implicit_cast<From*, To>(NULL);
+    implicit_cast<From*, To>(nullptr);
   }
 
   // uses RTTI in dbg and fastbuild. asserts are disabled in opt builds.
-  assert(f == NULL || dynamic_cast<To>(f) != NULL);  // NOLINT
+  assert(f == nullptr || dynamic_cast<To>(f) != nullptr);  // NOLINT
   return static_cast<To>(f);
 }
 
diff --git a/internal/ceres/cgnr_linear_operator.h b/internal/ceres/cgnr_linear_operator.h
index a0d9e85..569ecdc 100644
--- a/internal/ceres/cgnr_linear_operator.h
+++ b/internal/ceres/cgnr_linear_operator.h
@@ -94,7 +94,7 @@
     A_.LeftMultiply(z_.get(), y);
 
     // y = y + DtDx
-    if (D_ != NULL) {
+    if (D_ != nullptr) {
       int n = A_.num_cols();
       VectorRef(y, n).array() +=
           ConstVectorRef(D_, n).array().square() * ConstVectorRef(x, n).array();
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index 2f38d30..e89305f 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -30,6 +30,8 @@
 
 #include "ceres/cgnr_solver.h"
 
+#include <memory>
+
 #include "ceres/block_jacobi_preconditioner.h"
 #include "ceres/cgnr_linear_operator.h"
 #include "ceres/conjugate_gradients_solver.h"
@@ -70,7 +72,7 @@
 
   if (!preconditioner_) {
     if (options_.preconditioner_type == JACOBI) {
-      preconditioner_.reset(new BlockJacobiPreconditioner(*A));
+      preconditioner_ = std::make_unique<BlockJacobiPreconditioner>(*A);
     } else if (options_.preconditioner_type == SUBSET) {
       Preconditioner::Options preconditioner_options;
       preconditioner_options.type = SUBSET;
@@ -81,8 +83,8 @@
       preconditioner_options.use_postordering = options_.use_postordering;
       preconditioner_options.num_threads = options_.num_threads;
       preconditioner_options.context = options_.context;
-      preconditioner_.reset(
-          new SubsetPreconditioner(preconditioner_options, *A));
+      preconditioner_ =
+          std::make_unique<SubsetPreconditioner>(preconditioner_options, *A);
     }
   }
 
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
index c6a3001..2526940 100644
--- a/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -30,7 +30,9 @@
 
 #include "ceres/compressed_row_jacobian_writer.h"
 
+#include <algorithm>
 #include <iterator>
+#include <string>
 #include <utility>
 #include <vector>
 
@@ -81,10 +83,12 @@
           make_pair(parameter_block->index(), j));
     }
   }
-  sort(evaluated_jacobian_blocks->begin(), evaluated_jacobian_blocks->end());
+  std::sort(evaluated_jacobian_blocks->begin(),
+            evaluated_jacobian_blocks->end());
 }
 
-SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
+std::unique_ptr<SparseMatrix> CompressedRowJacobianWriter::CreateJacobian()
+    const {
   const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
 
   int total_num_residuals = program_->NumResiduals();
@@ -108,10 +112,11 @@
   // Allocate more space than needed to store the jacobian so that when the LM
   // algorithm adds the diagonal, no reallocation is necessary. This reduces
   // peak memory usage significantly.
-  CompressedRowSparseMatrix* jacobian = new CompressedRowSparseMatrix(
-      total_num_residuals,
-      total_num_effective_parameters,
-      num_jacobian_nonzeros + total_num_effective_parameters);
+  std::unique_ptr<CompressedRowSparseMatrix> jacobian =
+      std::make_unique<CompressedRowSparseMatrix>(
+          total_num_residuals,
+          total_num_effective_parameters,
+          num_jacobian_nonzeros + total_num_effective_parameters);
 
   // At this stage, the CompressedRowSparseMatrix is an invalid state. But this
   // seems to be the only way to construct it without doing a memory copy.
@@ -183,7 +188,7 @@
   }
   CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
 
-  PopulateJacobianRowAndColumnBlockVectors(program_, jacobian);
+  PopulateJacobianRowAndColumnBlockVectors(program_, jacobian.get());
 
   return jacobian;
 }
diff --git a/internal/ceres/compressed_row_jacobian_writer.h b/internal/ceres/compressed_row_jacobian_writer.h
index b1251ca..2765a58 100644
--- a/internal/ceres/compressed_row_jacobian_writer.h
+++ b/internal/ceres/compressed_row_jacobian_writer.h
@@ -93,7 +93,7 @@
     return ScratchEvaluatePreparer::Create(*program_, num_threads);
   }
 
-  SparseMatrix* CreateJacobian() const;
+  std::unique_ptr<SparseMatrix> CreateJacobian() const;
 
   void Write(int residual_id,
              int residual_offset,
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index b9e2715..766e413 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -104,7 +104,7 @@
       const int c = cols[idx];
       const int transpose_idx = transpose_rows[c]++;
       transpose_cols[transpose_idx] = r;
-      if (values != NULL && transpose_values != NULL) {
+      if (values != nullptr && transpose_values != nullptr) {
         transpose_values[transpose_idx] = values[idx];
       }
     }
@@ -174,18 +174,20 @@
                  cols_.size() * sizeof(double);  // NOLINT
 }
 
-CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+std::unique_ptr<CompressedRowSparseMatrix>
+CompressedRowSparseMatrix::FromTripletSparseMatrix(
     const TripletSparseMatrix& input) {
   return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
 }
 
-CompressedRowSparseMatrix*
+std::unique_ptr<CompressedRowSparseMatrix>
 CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
     const TripletSparseMatrix& input) {
   return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
 }
 
-CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+std::unique_ptr<CompressedRowSparseMatrix>
+CompressedRowSparseMatrix::FromTripletSparseMatrix(
     const TripletSparseMatrix& input, bool transpose) {
   int num_rows = input.num_rows();
   int num_cols = input.num_cols();
@@ -214,8 +216,9 @@
               input.num_nonzeros() * sizeof(int) +     // NOLINT
               input.num_nonzeros() * sizeof(double));  // NOLINT
 
-  CompressedRowSparseMatrix* output =
-      new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
+  std::unique_ptr<CompressedRowSparseMatrix> output =
+      std::make_unique<CompressedRowSparseMatrix>(
+          num_rows, num_cols, input.num_nonzeros());
 
   if (num_rows == 0) {
     // No data to copy.
@@ -533,7 +536,8 @@
   values_.resize(num_nonzeros);
 }
 
-CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+std::unique_ptr<CompressedRowSparseMatrix>
+CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
     const double* diagonal, const vector<int>& blocks) {
   int num_rows = 0;
   int num_nonzeros = 0;
@@ -542,8 +546,9 @@
     num_nonzeros += blocks[i] * blocks[i];
   }
 
-  CompressedRowSparseMatrix* matrix =
-      new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros);
+  std::unique_ptr<CompressedRowSparseMatrix> matrix =
+      std::make_unique<CompressedRowSparseMatrix>(
+          num_rows, num_rows, num_nonzeros);
 
   int* rows = matrix->mutable_rows();
   int* cols = matrix->mutable_cols();
@@ -573,9 +578,11 @@
   return matrix;
 }
 
-CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
-  CompressedRowSparseMatrix* transpose =
-      new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
+std::unique_ptr<CompressedRowSparseMatrix>
+CompressedRowSparseMatrix::Transpose() const {
+  std::unique_ptr<CompressedRowSparseMatrix> transpose =
+      std::make_unique<CompressedRowSparseMatrix>(
+          num_cols_, num_rows_, num_nonzeros());
 
   switch (storage_type_) {
     case UNSYMMETRIC:
@@ -612,7 +619,8 @@
   return transpose;
 }
 
-CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
+std::unique_ptr<CompressedRowSparseMatrix>
+CompressedRowSparseMatrix::CreateRandomMatrix(
     CompressedRowSparseMatrix::RandomMatrixOptions options) {
   CHECK_GT(options.num_row_blocks, 0);
   CHECK_GT(options.min_row_block_size, 0);
@@ -714,7 +722,7 @@
   const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
   const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
   const bool kDoNotTranspose = false;
-  CompressedRowSparseMatrix* matrix =
+  std::unique_ptr<CompressedRowSparseMatrix> matrix =
       CompressedRowSparseMatrix::FromTripletSparseMatrix(
           TripletSparseMatrix(
               num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index a92fc91..0805ee7 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -63,9 +63,7 @@
   // entries.
   //
   // The storage type of the matrix is set to UNSYMMETRIC.
-  //
-  // Caller owns the result.
-  static CompressedRowSparseMatrix* FromTripletSparseMatrix(
+  static std::unique_ptr<CompressedRowSparseMatrix> FromTripletSparseMatrix(
       const TripletSparseMatrix& input);
 
   // Create a matrix with the same content as the TripletSparseMatrix
@@ -73,10 +71,8 @@
   // entries.
   //
   // The storage type of the matrix is set to UNSYMMETRIC.
-  //
-  // Caller owns the result.
-  static CompressedRowSparseMatrix* FromTripletSparseMatrixTransposed(
-      const TripletSparseMatrix& input);
+  static std::unique_ptr<CompressedRowSparseMatrix>
+  FromTripletSparseMatrixTransposed(const TripletSparseMatrix& input);
 
   // Use this constructor only if you know what you are doing. This
   // creates a "blank" matrix with the appropriate amount of memory
@@ -124,7 +120,7 @@
 
   void ToCRSMatrix(CRSMatrix* matrix) const;
 
-  CompressedRowSparseMatrix* Transpose() const;
+  std::unique_ptr<CompressedRowSparseMatrix> Transpose() const;
 
   // Destructive array resizing method.
   void SetMaxNumNonZeros(int num_nonzeros);
@@ -154,9 +150,7 @@
   // Create a block diagonal CompressedRowSparseMatrix with the given
   // block structure. The individual blocks are assumed to be laid out
   // contiguously in the diagonal array, one block at a time.
-  //
-  // Caller owns the result.
-  static CompressedRowSparseMatrix* CreateBlockDiagonalMatrix(
+  static std::unique_ptr<CompressedRowSparseMatrix> CreateBlockDiagonalMatrix(
       const double* diagonal, const std::vector<int>& blocks);
 
   // Options struct to control the generation of random block sparse
@@ -198,13 +192,11 @@
   // Create a random CompressedRowSparseMatrix whose entries are
   // normally distributed and whose structure is determined by
   // RandomMatrixOptions.
-  //
-  // Caller owns the result.
-  static CompressedRowSparseMatrix* CreateRandomMatrix(
+  static std::unique_ptr<CompressedRowSparseMatrix> CreateRandomMatrix(
       RandomMatrixOptions options);
 
  private:
-  static CompressedRowSparseMatrix* FromTripletSparseMatrix(
+  static std::unique_ptr<CompressedRowSparseMatrix> FromTripletSparseMatrix(
       const TripletSparseMatrix& input, bool transpose);
 
   int num_rows_;
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 91f3ba4..3a2768c 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -30,8 +30,10 @@
 
 #include "ceres/compressed_row_sparse_matrix.h"
 
+#include <algorithm>
 #include <memory>
 #include <numeric>
+#include <string>
 
 #include "Eigen/SparseCore"
 #include "ceres/casts.h"
@@ -72,13 +74,13 @@
 class CompressedRowSparseMatrixTest : public ::testing::Test {
  protected:
   void SetUp() final {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(1));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(1);
 
     CHECK(problem != nullptr);
 
     tsm.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
-    crsm.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    crsm = CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
 
     num_rows = tsm->num_rows();
     num_cols = tsm->num_cols();
@@ -132,8 +134,8 @@
     tsm_appendage.Resize(i, num_cols);
 
     tsm->AppendRows(tsm_appendage);
-    std::unique_ptr<CompressedRowSparseMatrix> crsm_appendage(
-        CompressedRowSparseMatrix::FromTripletSparseMatrix(tsm_appendage));
+    std::unique_ptr<CompressedRowSparseMatrix> crsm_appendage =
+        CompressedRowSparseMatrix::FromTripletSparseMatrix(tsm_appendage);
 
     crsm->AppendRows(*crsm_appendage);
     CompareMatrices(tsm.get(), crsm.get());
@@ -143,7 +145,8 @@
 TEST_F(CompressedRowSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
   int num_diagonal_rows = crsm->num_cols();
 
-  std::unique_ptr<double[]> diagonal(new double[num_diagonal_rows]);
+  std::unique_ptr<double[]> diagonal =
+      std::make_unique<double[]>(num_diagonal_rows);
   for (int i = 0; i < num_diagonal_rows; ++i) {
     diagonal[i] = i;
   }
@@ -156,9 +159,9 @@
   const vector<int> pre_row_blocks = crsm->row_blocks();
   const vector<int> pre_col_blocks = crsm->col_blocks();
 
-  std::unique_ptr<CompressedRowSparseMatrix> appendage(
+  std::unique_ptr<CompressedRowSparseMatrix> appendage =
       CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
-          diagonal.get(), row_and_column_blocks));
+          diagonal.get(), row_and_column_blocks);
 
   crsm->AppendRows(*appendage);
 
@@ -220,9 +223,9 @@
     diagonal(i) = i + 1;
   }
 
-  std::unique_ptr<CompressedRowSparseMatrix> matrix(
+  std::unique_ptr<CompressedRowSparseMatrix> matrix =
       CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(diagonal.data(),
-                                                           blocks));
+                                                           blocks);
 
   EXPECT_EQ(matrix->num_rows(), 5);
   EXPECT_EQ(matrix->num_cols(), 5);
@@ -305,7 +308,7 @@
 
   std::copy(values, values + 17, cols);
 
-  std::unique_ptr<CompressedRowSparseMatrix> transpose(matrix.Transpose());
+  std::unique_ptr<CompressedRowSparseMatrix> transpose = matrix.Transpose();
 
   ASSERT_EQ(transpose->row_blocks().size(), matrix.col_blocks().size());
   for (int i = 0; i < transpose->row_blocks().size(); ++i) {
@@ -333,10 +336,10 @@
 
   const int kNumTrials = 10;
   for (int i = 0; i < kNumTrials; ++i) {
-    std::unique_ptr<TripletSparseMatrix> tsm(
-        TripletSparseMatrix::CreateRandomMatrix(options));
-    std::unique_ptr<CompressedRowSparseMatrix> crsm(
-        CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    std::unique_ptr<TripletSparseMatrix> tsm =
+        TripletSparseMatrix::CreateRandomMatrix(options);
+    std::unique_ptr<CompressedRowSparseMatrix> crsm =
+        CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
 
     Matrix expected;
     tsm->ToDenseMatrix(&expected);
@@ -359,10 +362,10 @@
 
   const int kNumTrials = 10;
   for (int i = 0; i < kNumTrials; ++i) {
-    std::unique_ptr<TripletSparseMatrix> tsm(
-        TripletSparseMatrix::CreateRandomMatrix(options));
-    std::unique_ptr<CompressedRowSparseMatrix> crsm(
-        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+    std::unique_ptr<TripletSparseMatrix> tsm =
+        TripletSparseMatrix::CreateRandomMatrix(options);
+    std::unique_ptr<CompressedRowSparseMatrix> crsm =
+        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm);
 
     Matrix tmp;
     tsm->ToDenseMatrix(&tmp);
@@ -416,8 +419,8 @@
       options.max_row_block_size = kMaxBlockSize;
       options.block_density = std::max(0.5, RandDouble());
       options.storage_type = ::testing::get<0>(param);
-      std::unique_ptr<CompressedRowSparseMatrix> matrix(
-          CompressedRowSparseMatrix::CreateRandomMatrix(options));
+      std::unique_ptr<CompressedRowSparseMatrix> matrix =
+          CompressedRowSparseMatrix::CreateRandomMatrix(options);
       const int num_rows = matrix->num_rows();
       const int num_cols = matrix->num_cols();
 
@@ -485,8 +488,8 @@
       options.max_row_block_size = kMaxBlockSize;
       options.block_density = std::max(0.5, RandDouble());
       options.storage_type = ::testing::get<0>(param);
-      std::unique_ptr<CompressedRowSparseMatrix> matrix(
-          CompressedRowSparseMatrix::CreateRandomMatrix(options));
+      std::unique_ptr<CompressedRowSparseMatrix> matrix =
+          CompressedRowSparseMatrix::CreateRandomMatrix(options);
       const int num_rows = matrix->num_rows();
       const int num_cols = matrix->num_cols();
 
@@ -554,8 +557,8 @@
       options.max_row_block_size = kMaxBlockSize;
       options.block_density = std::max(0.5, RandDouble());
       options.storage_type = ::testing::get<0>(param);
-      std::unique_ptr<CompressedRowSparseMatrix> matrix(
-          CompressedRowSparseMatrix::CreateRandomMatrix(options));
+      std::unique_ptr<CompressedRowSparseMatrix> matrix =
+          CompressedRowSparseMatrix::CreateRandomMatrix(options);
       const int num_cols = matrix->num_cols();
 
       Vector actual(num_cols);
diff --git a/internal/ceres/conditioned_cost_function.cc b/internal/ceres/conditioned_cost_function.cc
index fb4c52a..a9013a2 100644
--- a/internal/ceres/conditioned_cost_function.cc
+++ b/internal/ceres/conditioned_cost_function.cc
@@ -98,7 +98,7 @@
       double** conditioner_derivative_pointer2 =
           &conditioner_derivative_pointer;
       if (!jacobians) {
-        conditioner_derivative_pointer2 = NULL;
+        conditioner_derivative_pointer2 = nullptr;
       }
 
       double unconditioned_residual = residuals[r];
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
index 3019628..436a5b1 100644
--- a/internal/ceres/conjugate_gradients_solver.cc
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -112,7 +112,7 @@
 
   for (summary.num_iterations = 1;; ++summary.num_iterations) {
     // Apply preconditioner
-    if (per_solve_options.preconditioner != NULL) {
+    if (per_solve_options.preconditioner != nullptr) {
       z.setZero();
       per_solve_options.preconditioner->RightMultiply(r.data(), z.data());
     } else {
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index 3c566c7..86ab838 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -135,8 +135,10 @@
     parameter_block->SetConstant();
   }
 
-  std::unique_ptr<LinearSolver*[]> linear_solvers(
-      new LinearSolver*[options.num_threads]);
+  std::vector<std::unique_ptr<LinearSolver>> linear_solvers(
+      options.num_threads);
+  // std::unique_ptr<LinearSolver*[]> linear_solvers(
+  //    new LinearSolver*[options.num_threads]);
 
   LinearSolver::Options linear_solver_options;
   linear_solver_options.type = DENSE_QR;
@@ -188,7 +190,7 @@
           // we are fine.
           Solver::Summary inner_summary;
           Solve(&inner_program,
-                linear_solvers[thread_id],
+                linear_solvers[thread_id].get(),
                 parameters + parameter_block->state_offset(),
                 &inner_summary);
 
@@ -204,9 +206,9 @@
     parameter_blocks_[i]->SetVarying();
   }
 
-  for (int i = 0; i < options.num_threads; ++i) {
-    delete linear_solvers[i];
-  }
+  //  for (int i = 0; i < options.num_threads; ++i) {
+  //  delete linear_solvers[i];
+  //}
 }
 
 // Solve the optimization problem for one parameter block.
@@ -221,17 +223,16 @@
   string error;
 
   Minimizer::Options minimizer_options;
-  minimizer_options.evaluator.reset(
-      Evaluator::Create(evaluator_options_, program, &error));
+  minimizer_options.evaluator =
+      Evaluator::Create(evaluator_options_, program, &error);
   CHECK(minimizer_options.evaluator != nullptr);
-  minimizer_options.jacobian.reset(
-      minimizer_options.evaluator->CreateJacobian());
+  minimizer_options.jacobian = minimizer_options.evaluator->CreateJacobian();
   CHECK(minimizer_options.jacobian != nullptr);
 
   TrustRegionStrategy::Options trs_options;
   trs_options.linear_solver = linear_solver;
-  minimizer_options.trust_region_strategy.reset(
-      TrustRegionStrategy::Create(trs_options));
+  minimizer_options.trust_region_strategy =
+      TrustRegionStrategy::Create(trs_options);
   CHECK(minimizer_options.trust_region_strategy != nullptr);
   minimizer_options.is_silent = true;
 
@@ -263,12 +264,12 @@
 // of independent sets of decreasing size and invert it. This
 // seems to work better in practice, i.e., Cameras before
 // points.
-ParameterBlockOrdering* CoordinateDescentMinimizer::CreateOrdering(
-    const Program& program) {
-  std::unique_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
+std::shared_ptr<ParameterBlockOrdering>
+CoordinateDescentMinimizer::CreateOrdering(const Program& program) {
+  auto ordering = std::make_shared<ParameterBlockOrdering>();
   ComputeRecursiveIndependentSetOrdering(program, ordering.get());
   ordering->Reverse();
-  return ordering.release();
+  return ordering;
 }
 
 }  // namespace internal
diff --git a/internal/ceres/coordinate_descent_minimizer.h b/internal/ceres/coordinate_descent_minimizer.h
index 90ba2cf..d46e6a8 100644
--- a/internal/ceres/coordinate_descent_minimizer.h
+++ b/internal/ceres/coordinate_descent_minimizer.h
@@ -81,7 +81,8 @@
   // of independent sets of decreasing size and invert it. This
   // seems to work better in practice, i.e., Cameras before
   // points.
-  static ParameterBlockOrdering* CreateOrdering(const Program& program);
+  static std::shared_ptr<ParameterBlockOrdering> CreateOrdering(
+      const Program& program);
 
  private:
   void Solve(Program* program,
diff --git a/internal/ceres/corrector.cc b/internal/ceres/corrector.cc
index 6a79a06..bf3ba9c 100644
--- a/internal/ceres/corrector.cc
+++ b/internal/ceres/corrector.cc
@@ -111,7 +111,7 @@
 }
 
 void Corrector::CorrectResiduals(const int num_rows, double* residuals) {
-  DCHECK(residuals != NULL);
+  DCHECK(residuals != nullptr);
   // Equation 11 in BANS.
   VectorRef(residuals, num_rows) *= residual_scaling_;
 }
@@ -120,8 +120,8 @@
                                 const int num_cols,
                                 double* residuals,
                                 double* jacobian) {
-  DCHECK(residuals != NULL);
-  DCHECK(jacobian != NULL);
+  DCHECK(residuals != nullptr);
+  DCHECK(jacobian != nullptr);
 
   // The common case (rho[2] <= 0).
   if (alpha_sq_norm_ == 0.0) {
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc
index 11f47e3..80efd66 100644
--- a/internal/ceres/cost_function_to_functor_test.cc
+++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -92,9 +92,9 @@
   }
 
   EXPECT_TRUE(
-      cost_function.Evaluate(parameter_blocks.get(), residuals.get(), NULL));
+      cost_function.Evaluate(parameter_blocks.get(), residuals.get(), nullptr));
   EXPECT_TRUE(actual_cost_function.Evaluate(
-      parameter_blocks.get(), actual_residuals.get(), NULL));
+      parameter_blocks.get(), actual_residuals.get(), nullptr));
   for (int i = 0; i < num_residuals; ++i) {
     EXPECT_NEAR(residuals[i], actual_residuals[i], kTolerance)
         << "residual id: " << i;
diff --git a/internal/ceres/covariance.cc b/internal/ceres/covariance.cc
index ead3287..d63dd37 100644
--- a/internal/ceres/covariance.cc
+++ b/internal/ceres/covariance.cc
@@ -39,12 +39,11 @@
 
 namespace ceres {
 
-using std::make_pair;
 using std::pair;
 using std::vector;
 
 Covariance::Covariance(const Covariance::Options& options) {
-  impl_.reset(new internal::CovarianceImpl(options));
+  impl_ = std::make_unique<internal::CovarianceImpl>(options);
 }
 
 Covariance::~Covariance() = default;
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index 8c00384..5075609 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -83,7 +83,7 @@
 
 template <typename T>
 void CheckForDuplicates(std::vector<T> blocks) {
-  sort(blocks.begin(), blocks.end());
+  std::sort(blocks.begin(), blocks.end());
   typename std::vector<T>::iterator it =
       std::adjacent_find(blocks.begin(), blocks.end());
   if (it != blocks.end()) {
@@ -117,7 +117,7 @@
       covariance_blocks);
   problem_ = problem;
   parameter_block_to_row_index_.clear();
-  covariance_matrix_.reset(nullptr);
+  covariance_matrix_ = nullptr;
   is_valid_ = (ComputeCovarianceSparsity(covariance_blocks, problem) &&
                ComputeCovarianceValues());
   is_computed_ = true;
@@ -448,9 +448,9 @@
     }
   }
 
-  if (covariance_blocks.size() == 0) {
+  if (covariance_blocks.empty()) {
     VLOG(2) << "No non-zero covariance blocks found";
-    covariance_matrix_.reset(nullptr);
+    covariance_matrix_ = nullptr;
     return true;
   }
 
@@ -460,8 +460,8 @@
   std::sort(covariance_blocks.begin(), covariance_blocks.end());
 
   // Fill the sparsity pattern of the covariance matrix.
-  covariance_matrix_.reset(
-      new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros));
+  covariance_matrix_ = std::make_unique<CompressedRowSparseMatrix>(
+      num_rows, num_rows, num_nonzeros);
 
   int* rows = covariance_matrix_->mutable_rows();
   int* cols = covariance_matrix_->mutable_cols();
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
index e39b2ae..b117a64 100644
--- a/internal/ceres/covariance_test.cc
+++ b/internal/ceres/covariance_test.cc
@@ -72,12 +72,12 @@
       residuals[i] = 1;
     }
 
-    if (jacobians == NULL) {
+    if (jacobians == nullptr) {
       return true;
     }
 
-    if (jacobians[0] != NULL) {
-      copy(jacobian_.begin(), jacobian_.end(), jacobians[0]);
+    if (jacobians[0] != nullptr) {
+      std::copy(jacobian_.begin(), jacobian_.end(), jacobians[0]);
     }
 
     return true;
@@ -110,16 +110,16 @@
       residuals[i] = 2;
     }
 
-    if (jacobians == NULL) {
+    if (jacobians == nullptr) {
       return true;
     }
 
-    if (jacobians[0] != NULL) {
-      copy(jacobian1_.begin(), jacobian1_.end(), jacobians[0]);
+    if (jacobians[0] != nullptr) {
+      std::copy(jacobian1_.begin(), jacobian1_.end(), jacobians[0]);
     }
 
-    if (jacobians[1] != NULL) {
-      copy(jacobian2_.begin(), jacobian2_.end(), jacobians[1]);
+    if (jacobians[1] != nullptr) {
+      std::copy(jacobian2_.begin(), jacobian2_.end(), jacobians[1]);
     }
 
     return true;
@@ -143,13 +143,13 @@
   // Add in random order
   Vector junk_jacobian = Vector::Zero(10);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);
+      new UnaryCostFunction(1, 1, junk_jacobian.data()), nullptr, block1);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);
+      new UnaryCostFunction(1, 4, junk_jacobian.data()), nullptr, block4);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 3, junk_jacobian.data()), NULL, block3);
+      new UnaryCostFunction(1, 3, junk_jacobian.data()), nullptr, block3);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);
+      new UnaryCostFunction(1, 2, junk_jacobian.data()), nullptr, block2);
 
   // Sparsity pattern
   //
@@ -229,13 +229,13 @@
   // Add in random order
   Vector junk_jacobian = Vector::Zero(10);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);
+      new UnaryCostFunction(1, 1, junk_jacobian.data()), nullptr, block1);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);
+      new UnaryCostFunction(1, 4, junk_jacobian.data()), nullptr, block4);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 3, junk_jacobian.data()), NULL, block3);
+      new UnaryCostFunction(1, 3, junk_jacobian.data()), nullptr, block3);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);
+      new UnaryCostFunction(1, 2, junk_jacobian.data()), nullptr, block2);
   problem.SetParameterBlockConstant(block3);
 
   // Sparsity pattern
@@ -310,12 +310,12 @@
   // Add in random order
   Vector junk_jacobian = Vector::Zero(10);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);
+      new UnaryCostFunction(1, 1, junk_jacobian.data()), nullptr, block1);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);
+      new UnaryCostFunction(1, 4, junk_jacobian.data()), nullptr, block4);
   problem.AddParameterBlock(block3, 3);
   problem.AddResidualBlock(
-      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);
+      new UnaryCostFunction(1, 2, junk_jacobian.data()), nullptr, block2);
 
   // Sparsity pattern
   //
@@ -450,32 +450,34 @@
 
     {
       double jacobian[] = {1.0, 0.0, 0.0, 1.0};
-      problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(2, 2, jacobian), nullptr, x);
     }
 
     {
       double jacobian[] = {2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0};
-      problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(3, 3, jacobian), nullptr, y);
     }
 
     {
       double jacobian = 5.0;
       problem_.AddResidualBlock(
-          new UnaryCostFunction(1, 1, &jacobian), NULL, z);
+          new UnaryCostFunction(1, 1, &jacobian), nullptr, z);
     }
 
     {
       double jacobian1[] = {1.0, 2.0, 3.0};
       double jacobian2[] = {-5.0, -6.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), NULL, y, x);
+          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), nullptr, y, x);
     }
 
     {
       double jacobian1[] = {2.0};
       double jacobian2[] = {3.0, -2.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), NULL, z, x);
+          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), nullptr, z, x);
     }
 
     all_covariance_blocks_.push_back(make_pair(x, x));
@@ -1368,32 +1370,34 @@
 
     {
       double jacobian[] = {1.0, 0.0, 0.0, 1.0};
-      problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(2, 2, jacobian), nullptr, x);
     }
 
     {
       double jacobian[] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
-      problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);
+      problem_.AddResidualBlock(
+          new UnaryCostFunction(3, 3, jacobian), nullptr, y);
     }
 
     {
       double jacobian = 5.0;
       problem_.AddResidualBlock(
-          new UnaryCostFunction(1, 1, &jacobian), NULL, z);
+          new UnaryCostFunction(1, 1, &jacobian), nullptr, z);
     }
 
     {
       double jacobian1[] = {0.0, 0.0, 0.0};
       double jacobian2[] = {-5.0, -6.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), NULL, y, x);
+          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), nullptr, y, x);
     }
 
     {
       double jacobian1[] = {2.0};
       double jacobian2[] = {3.0, -2.0};
       problem_.AddResidualBlock(
-          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), NULL, z, x);
+          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), nullptr, z, x);
     }
 
     all_covariance_blocks_.push_back(make_pair(x, x));
@@ -1602,8 +1606,8 @@
   void SetUp() final {
     num_parameter_blocks_ = 2000;
     parameter_block_size_ = 5;
-    parameters_.reset(
-        new double[parameter_block_size_ * num_parameter_blocks_]);
+    parameters_ = std::make_unique<double[]>(parameter_block_size_ *
+                                             num_parameter_blocks_);
 
     Matrix jacobian(parameter_block_size_, parameter_block_size_);
     for (int i = 0; i < num_parameter_blocks_; ++i) {
@@ -1614,7 +1618,7 @@
       problem_.AddResidualBlock(
           new UnaryCostFunction(
               parameter_block_size_, parameter_block_size_, jacobian.data()),
-          NULL,
+          nullptr,
           block_i);
       for (int j = i; j < num_parameter_blocks_; ++j) {
         double* block_j = parameters_.get() + j * parameter_block_size_;
diff --git a/internal/ceres/cubic_interpolation_test.cc b/internal/ceres/cubic_interpolation_test.cc
index 3907d22..03b1aae 100644
--- a/internal/ceres/cubic_interpolation_test.cc
+++ b/internal/ceres/cubic_interpolation_test.cc
@@ -226,7 +226,7 @@
                                       const double b,
                                       const double c,
                                       const double d) {
-    values_.reset(new double[kDataDimension * kNumSamples]);
+    values_ = std::make_unique<double[]>(kDataDimension * kNumSamples);
 
     for (int x = 0; x < kNumSamples; ++x) {
       for (int dim = 0; dim < kDataDimension; ++dim) {
@@ -335,7 +335,7 @@
 
   template <int kDataDimension>
   void RunPolynomialInterpolationTest(const Eigen::Matrix3d& coeff) {
-    values_.reset(new double[kNumRows * kNumCols * kDataDimension]);
+    values_ = std::make_unique<double[]>(kNumRows * kNumCols * kDataDimension);
     coeff_ = coeff;
     double* v = values_.get();
     for (int r = 0; r < kNumRows; ++r) {
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index 0167f98..7800e8b 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -47,7 +47,7 @@
 
 using std::vector;
 
-CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) {}
+CXSparse::CXSparse() : scratch_(nullptr), scratch_size_(0) {}
 
 CXSparse::~CXSparse() {
   if (scratch_size_ > 0) {
@@ -116,7 +116,7 @@
   block_matrix.nzmax = block_rows.size();
   block_matrix.p = &block_cols[0];
   block_matrix.i = &block_rows[0];
-  block_matrix.x = NULL;
+  block_matrix.x = nullptr;
 
   int* ordering = cs_amd(1, &block_matrix);
   vector<int> block_ordering(num_row_blocks, -1);
@@ -146,7 +146,7 @@
 
   if (symbolic_factor->lnz < 0) {
     cs_sfree(symbolic_factor);
-    symbolic_factor = NULL;
+    symbolic_factor = nullptr;
   }
 
   return symbolic_factor;
@@ -206,8 +206,8 @@
 
 CXSparseCholesky::CXSparseCholesky(const OrderingType ordering_type)
     : ordering_type_(ordering_type),
-      symbolic_factor_(NULL),
-      numeric_factor_(NULL) {}
+      symbolic_factor_(nullptr),
+      numeric_factor_(nullptr) {}
 
 CXSparseCholesky::~CXSparseCholesky() {
   FreeSymbolicFactorization();
@@ -217,14 +217,14 @@
 LinearSolverTerminationType CXSparseCholesky::Factorize(
     CompressedRowSparseMatrix* lhs, std::string* message) {
   CHECK_EQ(lhs->storage_type(), StorageType());
-  if (lhs == NULL) {
-    *message = "Failure: Input lhs is NULL.";
+  if (lhs == nullptr) {
+    *message = "Failure: Input lhs is nullptr.";
     return LINEAR_SOLVER_FATAL_ERROR;
   }
 
   cs_di cs_lhs = cs_.CreateSparseMatrixTransposeView(lhs);
 
-  if (symbolic_factor_ == NULL) {
+  if (symbolic_factor_ == nullptr) {
     if (ordering_type_ == NATURAL) {
       symbolic_factor_ = cs_.AnalyzeCholeskyWithNaturalOrdering(&cs_lhs);
     } else {
@@ -236,7 +236,7 @@
       }
     }
 
-    if (symbolic_factor_ == NULL) {
+    if (symbolic_factor_ == nullptr) {
       *message = "CXSparse Failure : Symbolic factorization failed.";
       return LINEAR_SOLVER_FATAL_ERROR;
     }
@@ -244,7 +244,7 @@
 
   FreeNumericFactorization();
   numeric_factor_ = cs_.Cholesky(&cs_lhs, symbolic_factor_);
-  if (numeric_factor_ == NULL) {
+  if (numeric_factor_ == nullptr) {
     *message = "CXSparse Failure : Numeric factorization failed.";
     return LINEAR_SOLVER_FAILURE;
   }
@@ -255,7 +255,7 @@
 LinearSolverTerminationType CXSparseCholesky::Solve(const double* rhs,
                                                     double* solution,
                                                     std::string* message) {
-  CHECK(numeric_factor_ != NULL)
+  CHECK(numeric_factor_ != nullptr)
       << "Solve called without a call to Factorize first.";
   const int num_cols = numeric_factor_->L->n;
   memcpy(solution, rhs, num_cols * sizeof(*solution));
@@ -264,16 +264,16 @@
 }
 
 void CXSparseCholesky::FreeSymbolicFactorization() {
-  if (symbolic_factor_ != NULL) {
+  if (symbolic_factor_ != nullptr) {
     cs_.Free(symbolic_factor_);
-    symbolic_factor_ = NULL;
+    symbolic_factor_ = nullptr;
   }
 }
 
 void CXSparseCholesky::FreeNumericFactorization() {
-  if (numeric_factor_ != NULL) {
+  if (numeric_factor_ != nullptr) {
     cs_.Free(numeric_factor_);
-    numeric_factor_ = NULL;
+    numeric_factor_ = nullptr;
   }
 }
 
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index d1d14ec..63dddb5 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -80,7 +80,7 @@
   cs_di CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
 
   // Creates a new matrix from a triplet form. Deallocate the returned matrix
-  // with Free. May return NULL if the compression or allocation fails.
+  // with Free. May return nullptr if the compression or allocation fails.
   cs_di* CreateSparseMatrix(TripletSparseMatrix* A);
 
   // B = A'
@@ -122,7 +122,7 @@
                                const std::vector<int>& col_blocks);
 
   // Compute an fill-reducing approximate minimum degree ordering of
-  // the matrix A. ordering should be non-NULL and should point to
+  // the matrix A. ordering should be non-nullptr and should point to
   // enough memory to hold the ordering for the rows of A.
   void ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering);
 
diff --git a/internal/ceres/dense_jacobian_writer.h b/internal/ceres/dense_jacobian_writer.h
index efecf35..0d86819 100644
--- a/internal/ceres/dense_jacobian_writer.h
+++ b/internal/ceres/dense_jacobian_writer.h
@@ -58,9 +58,9 @@
     return ScratchEvaluatePreparer::Create(*program_, num_threads);
   }
 
-  SparseMatrix* CreateJacobian() const {
-    return new DenseSparseMatrix(program_->NumResiduals(),
-                                 program_->NumEffectiveParameters());
+  std::unique_ptr<SparseMatrix> CreateJacobian() const {
+    return std::make_unique<DenseSparseMatrix>(
+        program_->NumResiduals(), program_->NumEffectiveParameters());
   }
 
   void Write(int residual_id,
diff --git a/internal/ceres/dense_linear_solver_test.cc b/internal/ceres/dense_linear_solver_test.cc
index 4dff5af..8110d8d 100644
--- a/internal/ceres/dense_linear_solver_test.cc
+++ b/internal/ceres/dense_linear_solver_test.cc
@@ -62,8 +62,8 @@
   Param param = GetParam();
   const bool regularized = testing::get<2>(param);
 
-  std::unique_ptr<LinearLeastSquaresProblem> problem(
-      CreateLinearLeastSquaresProblemFromId(testing::get<3>(param)));
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      CreateLinearLeastSquaresProblemFromId(testing::get<3>(param));
   DenseSparseMatrix lhs(*down_cast<TripletSparseMatrix*>(problem->A.get()));
 
   const int num_cols = lhs.num_cols();
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
index b208d58..2d3521c 100644
--- a/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -72,7 +72,7 @@
   //   rhs = A'b
   Vector rhs = A->matrix().transpose() * ConstVectorRef(b, num_rows);
 
-  if (per_solve_options.D != NULL) {
+  if (per_solve_options.D != nullptr) {
     ConstVectorRef D(per_solve_options.D, num_cols);
     lhs += D.array().square().matrix().asDiagonal();
   }
diff --git a/internal/ceres/dense_sparse_matrix_test.cc b/internal/ceres/dense_sparse_matrix_test.cc
index 2fa7216..1bfd6c6 100644
--- a/internal/ceres/dense_sparse_matrix_test.cc
+++ b/internal/ceres/dense_sparse_matrix_test.cc
@@ -70,13 +70,13 @@
 class DenseSparseMatrixTest : public ::testing::Test {
  protected:
   void SetUp() final {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(1));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(1);
 
     CHECK(problem != nullptr);
 
     tsm.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
-    dsm.reset(new DenseSparseMatrix(*tsm));
+    dsm = std::make_unique<DenseSparseMatrix>(*tsm);
 
     num_rows = tsm->num_rows();
     num_cols = tsm->num_cols();
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index 03ae22f..65f7ccd 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -480,7 +480,7 @@
 
   // Find the real parts y_i of its roots (not only the real roots).
   Vector roots_real;
-  if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
+  if (!FindPolynomialRoots(polynomial, &roots_real, nullptr)) {
     // Failed to find the roots of the polynomial, i.e. the candidate
     // solutions of the constrained problem. Report this back to the caller.
     return false;
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
index 0c20f25..cd5fe02 100644
--- a/internal/ceres/dogleg_strategy_test.cc
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -79,7 +79,7 @@
 
     Matrix sqrtD = Ddiag.array().sqrt().matrix().asDiagonal();
     Matrix jacobian = sqrtD * basis;
-    jacobian_.reset(new DenseSparseMatrix(jacobian));
+    jacobian_ = std::make_unique<DenseSparseMatrix>(jacobian);
 
     Vector minimum(6);
     minimum << 1.0, 1.0, 1.0, 1.0, 1.0, 1.0;
@@ -107,7 +107,7 @@
     Ddiag << 1.0, 2.0, 4.0, 8.0, 16.0, 32.0;
 
     Matrix jacobian = Ddiag.asDiagonal();
-    jacobian_.reset(new DenseSparseMatrix(jacobian));
+    jacobian_ = std::make_unique<DenseSparseMatrix>(jacobian);
 
     Vector minimum(6);
     minimum << 0.0, 0.0, 1.0, 0.0, 0.0, 0.0;
diff --git a/internal/ceres/dynamic_autodiff_cost_function_test.cc b/internal/ceres/dynamic_autodiff_cost_function_test.cc
index 55d3fe1..7da81a9 100644
--- a/internal/ceres/dynamic_autodiff_cost_function_test.cc
+++ b/internal/ceres/dynamic_autodiff_cost_function_test.cc
@@ -89,7 +89,7 @@
   parameter_blocks[0] = &param_block_0[0];
   parameter_blocks[1] = &param_block_1[0];
   EXPECT_TRUE(
-      cost_function.Evaluate(&parameter_blocks[0], residuals.data(), NULL));
+      cost_function.Evaluate(&parameter_blocks[0], residuals.data(), nullptr));
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(1.0 * r, residuals.at(r * 2));
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
@@ -189,7 +189,7 @@
   jacobian_vect[0].resize(21 * 10, -100000);
   jacobian_vect[1].resize(21 * 5, -100000);
   vector<double*> jacobian;
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect[1].data());
 
   // Test jacobian computation.
@@ -240,7 +240,7 @@
   jacobian_vect[1].resize(21 * 5, -100000);
   vector<double*> jacobian;
   jacobian.push_back(jacobian_vect[0].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
 
   // Test jacobian computation.
   EXPECT_TRUE(cost_function.Evaluate(
@@ -329,15 +329,14 @@
     // Prepare the cost function.
     typedef DynamicAutoDiffCostFunction<MyThreeParameterCostFunctor, 3>
         DynamicMyThreeParameterCostFunction;
-    DynamicMyThreeParameterCostFunction* cost_function =
-        new DynamicMyThreeParameterCostFunction(
-            new MyThreeParameterCostFunctor());
+    auto cost_function = std::make_unique<DynamicMyThreeParameterCostFunction>(
+        new MyThreeParameterCostFunctor());
     cost_function->AddParameterBlock(1);
     cost_function->AddParameterBlock(2);
     cost_function->AddParameterBlock(3);
     cost_function->SetNumResiduals(7);
 
-    cost_function_.reset(cost_function);
+    cost_function_ = std::move(cost_function);
 
     // Setup jacobian data.
     jacobian_vect_.resize(3);
@@ -430,7 +429,7 @@
 TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
   vector<double> residuals(7, -100000);
   EXPECT_TRUE(cost_function_->Evaluate(
-      parameter_blocks_.data(), residuals.data(), NULL));
+      parameter_blocks_.data(), residuals.data(), nullptr));
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
   }
@@ -469,9 +468,9 @@
   vector<double> residuals(7, -100000);
 
   vector<double*> jacobian;
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[1].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
 
   EXPECT_TRUE(cost_function_->Evaluate(
       parameter_blocks_.data(), residuals.data(), jacobian.data()));
@@ -491,7 +490,7 @@
 
   vector<double*> jacobian;
   jacobian.push_back(jacobian_vect_[0].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[2].data());
 
   EXPECT_TRUE(cost_function_->Evaluate(
@@ -562,14 +561,14 @@
     // Prepare the cost function.
     typedef DynamicAutoDiffCostFunction<MySixParameterCostFunctor, 3>
         DynamicMySixParameterCostFunction;
-    DynamicMySixParameterCostFunction* cost_function =
-        new DynamicMySixParameterCostFunction(new MySixParameterCostFunctor());
+    auto cost_function = std::make_unique<DynamicMySixParameterCostFunction>(
+        new MySixParameterCostFunctor());
     for (int i = 0; i < 6; ++i) {
       cost_function->AddParameterBlock(1);
     }
     cost_function->SetNumResiduals(7);
 
-    cost_function_.reset(cost_function);
+    cost_function_ = std::move(cost_function);
 
     // Setup jacobian data.
     jacobian_vect_.resize(6);
@@ -669,7 +668,7 @@
 TEST_F(SixParameterCostFunctorTest, TestSixParameterResiduals) {
   vector<double> residuals(7, -100000);
   EXPECT_TRUE(cost_function_->Evaluate(
-      parameter_blocks_.data(), residuals.data(), NULL));
+      parameter_blocks_.data(), residuals.data(), nullptr));
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
   }
@@ -706,10 +705,10 @@
   vector<double*> jacobian;
   jacobian.push_back(jacobian_vect_[0].data());
   jacobian.push_back(jacobian_vect_[1].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[3].data());
   jacobian.push_back(jacobian_vect_[4].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
 
   EXPECT_TRUE(cost_function_->Evaluate(
       parameter_blocks_.data(), residuals.data(), jacobian.data()));
@@ -735,10 +734,10 @@
 
   vector<double*> jacobian;
   jacobian.push_back(jacobian_vect_[0].data());
-  jacobian.push_back(NULL);
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[3].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[5].data());
 
   EXPECT_TRUE(cost_function_->Evaluate(
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
index 9f709a3..f6bb3b3 100644
--- a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
@@ -48,12 +48,12 @@
   return ScratchEvaluatePreparer::Create(*program_, num_threads);
 }
 
-SparseMatrix* DynamicCompressedRowJacobianWriter::CreateJacobian() const {
-  DynamicCompressedRowSparseMatrix* jacobian =
-      new DynamicCompressedRowSparseMatrix(program_->NumResiduals(),
-                                           program_->NumEffectiveParameters(),
-                                           0 /* max_num_nonzeros */);
-  return jacobian;
+std::unique_ptr<SparseMatrix>
+DynamicCompressedRowJacobianWriter::CreateJacobian() const {
+  return std::make_unique<DynamicCompressedRowSparseMatrix>(
+      program_->NumResiduals(),
+      program_->NumEffectiveParameters(),
+      0 /* max_num_nonzeros */);
 }
 
 void DynamicCompressedRowJacobianWriter::Write(int residual_id,
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.h b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
index ef8fa25..b9858ba 100644
--- a/internal/ceres/dynamic_compressed_row_jacobian_writer.h
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
@@ -60,7 +60,7 @@
   // Return a `DynamicCompressedRowSparseMatrix` which is filled by
   // `Write`. Note that `Finalize` must be called to make the
   // `CompressedRowSparseMatrix` interface valid.
-  SparseMatrix* CreateJacobian() const;
+  std::unique_ptr<SparseMatrix> CreateJacobian() const;
 
   // Write only the non-zero jacobian entries for a residual block
   // (specified by `residual_id`) into `base_jacobian`, starting at the row
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
index 95dc807..3b67389 100644
--- a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
@@ -61,7 +61,8 @@
     InitialiseDenseReference();
     InitialiseSparseMatrixReferences();
 
-    dcrsm.reset(new DynamicCompressedRowSparseMatrix(num_rows, num_cols, 0));
+    dcrsm = std::make_unique<DynamicCompressedRowSparseMatrix>(
+        num_rows, num_cols, 0);
   }
 
   void Finalize() { dcrsm->Finalize(num_additional_elements); }
@@ -93,8 +94,8 @@
     }
     ASSERT_EQ(values.size(), expected_num_nonzeros);
 
-    tsm.reset(
-        new TripletSparseMatrix(num_rows, num_cols, expected_num_nonzeros));
+    tsm = std::make_unique<TripletSparseMatrix>(
+        num_rows, num_cols, expected_num_nonzeros);
     copy(rows.begin(), rows.end(), tsm->mutable_rows());
     copy(cols.begin(), cols.end(), tsm->mutable_cols());
     copy(values.begin(), values.end(), tsm->mutable_values());
@@ -104,7 +105,7 @@
     tsm->ToDenseMatrix(&dense_from_tsm);
     ASSERT_TRUE((dense.array() == dense_from_tsm.array()).all());
 
-    crsm.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    crsm = CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
     Matrix dense_from_crsm;
     crsm->ToDenseMatrix(&dense_from_crsm);
     ASSERT_TRUE((dense.array() == dense_from_crsm.array()).all());
diff --git a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
index 0150f5e..ad3f479 100644
--- a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
+++ b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
@@ -89,7 +89,7 @@
   parameter_blocks[0] = &param_block_0[0];
   parameter_blocks[1] = &param_block_1[0];
   EXPECT_TRUE(
-      cost_function.Evaluate(&parameter_blocks[0], residuals.data(), NULL));
+      cost_function.Evaluate(&parameter_blocks[0], residuals.data(), nullptr));
   for (int r = 0; r < 10; ++r) {
     EXPECT_EQ(1.0 * r, residuals.at(r * 2));
     EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
@@ -190,7 +190,7 @@
   jacobian_vect[0].resize(21 * 10, -100000);
   jacobian_vect[1].resize(21 * 5, -100000);
   vector<double*> jacobian;
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect[1].data());
 
   // Test jacobian computation.
@@ -241,7 +241,7 @@
   jacobian_vect[1].resize(21 * 5, -100000);
   vector<double*> jacobian;
   jacobian.push_back(jacobian_vect[0].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
 
   // Test jacobian computation.
   EXPECT_TRUE(cost_function.Evaluate(
@@ -330,15 +330,14 @@
     // Prepare the cost function.
     typedef DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>
         DynamicMyThreeParameterCostFunction;
-    DynamicMyThreeParameterCostFunction* cost_function =
-        new DynamicMyThreeParameterCostFunction(
-            new MyThreeParameterCostFunctor());
+    auto cost_function = std::make_unique<DynamicMyThreeParameterCostFunction>(
+        new MyThreeParameterCostFunctor());
     cost_function->AddParameterBlock(1);
     cost_function->AddParameterBlock(2);
     cost_function->AddParameterBlock(3);
     cost_function->SetNumResiduals(7);
 
-    cost_function_.reset(cost_function);
+    cost_function_ = std::move(cost_function);
 
     // Setup jacobian data.
     jacobian_vect_.resize(3);
@@ -431,7 +430,7 @@
 TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
   vector<double> residuals(7, -100000);
   EXPECT_TRUE(cost_function_->Evaluate(
-      parameter_blocks_.data(), residuals.data(), NULL));
+      parameter_blocks_.data(), residuals.data(), nullptr));
   for (int i = 0; i < 7; ++i) {
     EXPECT_EQ(expected_residuals_[i], residuals[i]);
   }
@@ -470,9 +469,9 @@
   vector<double> residuals(7, -100000);
 
   vector<double*> jacobian;
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[1].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
 
   EXPECT_TRUE(cost_function_->Evaluate(
       parameter_blocks_.data(), residuals.data(), jacobian.data()));
@@ -492,7 +491,7 @@
 
   vector<double*> jacobian;
   jacobian.push_back(jacobian_vect_[0].data());
-  jacobian.push_back(NULL);
+  jacobian.push_back(nullptr);
   jacobian.push_back(jacobian_vect_[2].data());
 
   EXPECT_TRUE(cost_function_->Evaluate(
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index 87cc904..d101aef 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -71,11 +71,11 @@
     // it before returning the matrix to the user.
     std::unique_ptr<CompressedRowSparseMatrix> regularizer;
     if (!A->col_blocks().empty()) {
-      regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
-          per_solve_options.D, A->col_blocks()));
+      regularizer = CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+          per_solve_options.D, A->col_blocks());
     } else {
-      regularizer.reset(
-          new CompressedRowSparseMatrix(per_solve_options.D, num_cols));
+      regularizer = std::make_unique<CompressedRowSparseMatrix>(
+          per_solve_options.D, num_cols);
     }
     A->AppendRows(*regularizer);
   }
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
index 8bf609e..f915d32 100644
--- a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
@@ -50,19 +50,19 @@
 class DynamicSparseNormalCholeskySolverTest : public ::testing::Test {
  protected:
   void SetUp() final {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(1));
-    A_.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(
-        *down_cast<TripletSparseMatrix*>(problem->A.get())));
-    b_.reset(problem->b.release());
-    D_.reset(problem->D.release());
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(1);
+    A_ = CompressedRowSparseMatrix::FromTripletSparseMatrix(
+        *down_cast<TripletSparseMatrix*>(problem->A.get()));
+    b_ = std::move(problem->b);
+    D_ = std::move(problem->D);
   }
 
   void TestSolver(const LinearSolver::Options& options, double* D) {
     Matrix dense_A;
     A_->ToDenseMatrix(&dense_A);
     Matrix lhs = dense_A.transpose() * dense_A;
-    if (D != NULL) {
+    if (D != nullptr) {
       lhs += (ConstVectorRef(D, A_->num_cols()).array() *
               ConstVectorRef(D, A_->num_cols()).array())
                  .matrix()
@@ -100,7 +100,7 @@
         sparse_linear_algebra_library_type;
     ContextImpl context;
     options.context = &context;
-    TestSolver(options, NULL);
+    TestSolver(options, nullptr);
     TestSolver(options, D_.get());
   }
 
diff --git a/internal/ceres/dynamic_sparsity_test.cc b/internal/ceres/dynamic_sparsity_test.cc
index 12e62ef..6c7b24c 100644
--- a/internal/ceres/dynamic_sparsity_test.cc
+++ b/internal/ceres/dynamic_sparsity_test.cc
@@ -307,16 +307,16 @@
     residuals[0] = y_[0] - ((1.0 - u) * x[1 + i0][0] + u * x[1 + i1][0]);
     residuals[1] = y_[1] - ((1.0 - u) * x[1 + i0][1] + u * x[1 + i1][1]);
 
-    if (jacobians == NULL) {
+    if (jacobians == nullptr) {
       return true;
     }
 
-    if (jacobians[0] != NULL) {
+    if (jacobians[0] != nullptr) {
       jacobians[0][0] = x[1 + i0][0] - x[1 + i1][0];
       jacobians[0][1] = x[1 + i0][1] - x[1 + i1][1];
     }
     for (int i = 0; i < num_segments_; ++i) {
-      if (jacobians[i + 1] != NULL) {
+      if (jacobians[i + 1] != nullptr) {
         MatrixRef(jacobians[i + 1], 2, 2).setZero();
         if (i == i0) {
           jacobians[i + 1][0] = -(1.0 - u);
@@ -403,7 +403,7 @@
   // For each data point add a residual which measures its distance to its
   // corresponding position on the line segment contour.
   std::vector<double*> parameter_blocks(1 + num_segments);
-  parameter_blocks[0] = NULL;
+  parameter_blocks[0] = nullptr;
   for (int i = 0; i < num_segments; ++i) {
     parameter_blocks[i + 1] = X.data() + 2 * i;
   }
@@ -411,7 +411,7 @@
     parameter_blocks[0] = &t[i];
     problem.AddResidualBlock(
         PointToLineSegmentContourCostFunction::Create(num_segments, kY.row(i)),
-        NULL,
+        nullptr,
         parameter_blocks);
   }
 
@@ -419,7 +419,7 @@
   for (int i = 0; i < num_segments; ++i) {
     problem.AddResidualBlock(
         EuclideanDistanceFunctor::Create(sqrt(regularization_weight)),
-        NULL,
+        nullptr,
         X.data() + 2 * i,
         X.data() + 2 * ((i + 1) % num_segments));
   }
diff --git a/internal/ceres/eigensparse.cc b/internal/ceres/eigensparse.cc
index 1c47a02..ba8e05a 100644
--- a/internal/ceres/eigensparse.cc
+++ b/internal/ceres/eigensparse.cc
@@ -109,7 +109,7 @@
                                         std::string* message) final {
     CHECK_EQ(lhs->storage_type(), StorageType());
 
-    typename Solver::Scalar* values_ptr = NULL;
+    typename Solver::Scalar* values_ptr = nullptr;
     if (std::is_same<typename Solver::Scalar, double>::value) {
       values_ptr =
           reinterpret_cast<typename Solver::Scalar*>(lhs->mutable_values());
@@ -141,8 +141,6 @@
 
 std::unique_ptr<SparseCholesky> EigenSparseCholesky::Create(
     const OrderingType ordering_type) {
-  std::unique_ptr<SparseCholesky> sparse_cholesky;
-
   typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
                                 Eigen::Upper,
                                 Eigen::AMDOrdering<int>>
@@ -151,20 +149,18 @@
                                 Eigen::Upper,
                                 Eigen::NaturalOrdering<int>>
       WithNaturalOrdering;
+
   if (ordering_type == AMD) {
-    sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+    return std::make_unique<EigenSparseCholeskyTemplate<WithAMDOrdering>>();
   } else {
-    sparse_cholesky.reset(
-        new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
+    return std::make_unique<EigenSparseCholeskyTemplate<WithNaturalOrdering>>();
   }
-  return sparse_cholesky;
 }
 
 EigenSparseCholesky::~EigenSparseCholesky() = default;
 
 std::unique_ptr<SparseCholesky> FloatEigenSparseCholesky::Create(
     const OrderingType ordering_type) {
-  std::unique_ptr<SparseCholesky> sparse_cholesky;
   typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
                                 Eigen::Upper,
                                 Eigen::AMDOrdering<int>>
@@ -174,12 +170,10 @@
                                 Eigen::NaturalOrdering<int>>
       WithNaturalOrdering;
   if (ordering_type == AMD) {
-    sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+    return std::make_unique<EigenSparseCholeskyTemplate<WithAMDOrdering>>();
   } else {
-    sparse_cholesky.reset(
-        new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
+    return std::make_unique<EigenSparseCholeskyTemplate<WithNaturalOrdering>>();
   }
-  return sparse_cholesky;
 }
 
 FloatEigenSparseCholesky::~FloatEigenSparseCholesky() = default;
diff --git a/internal/ceres/evaluation_callback_test.cc b/internal/ceres/evaluation_callback_test.cc
index f81f3bd..84fff8a 100644
--- a/internal/ceres/evaluation_callback_test.cc
+++ b/internal/ceres/evaluation_callback_test.cc
@@ -132,7 +132,7 @@
     double y = (*parameters)[1];
     residuals[0] = y - a * sin(x);
     residuals[1] = x;
-    if (jacobians != NULL) {
+    if (jacobians != nullptr) {
       (*jacobians)[2 * 0 + 0] = -a * cos(x);  // df1/dx
       (*jacobians)[2 * 0 + 1] = 1.0;          // df1/dy
       (*jacobians)[2 * 1 + 0] = 1.0;          // df2/dx
@@ -157,7 +157,7 @@
     EXPECT_EQ(prepare_parameter_hash, incoming_parameter_hash);
 
     // Check: jacobians are requested if they were in PrepareForEvaluation().
-    EXPECT_EQ(prepare_requested_jacobians, jacobians != NULL);
+    EXPECT_EQ(prepare_requested_jacobians, jacobians != nullptr);
 
     evaluate_num_calls++;
     evaluate_last_parameter_hash = incoming_parameter_hash;
@@ -196,7 +196,7 @@
   problem_options.evaluation_callback = &cost_function;
   problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
   Problem problem(problem_options);
-  problem.AddResidualBlock(&cost_function, NULL, parameters);
+  problem.AddResidualBlock(&cost_function, nullptr, parameters);
 
   Solver::Options options;
   options.linear_solver_type = DENSE_QR;
@@ -322,7 +322,7 @@
   problem_options.evaluation_callback = &cost_function;
   problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
   Problem problem(problem_options);
-  problem.AddResidualBlock(&cost_function, NULL, parameters);
+  problem.AddResidualBlock(&cost_function, nullptr, parameters);
 
   Solver::Options options;
   options.linear_solver_type = DENSE_QR;
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
index 4d69f96..f8d1121 100644
--- a/internal/ceres/evaluator.cc
+++ b/internal/ceres/evaluator.cc
@@ -50,36 +50,40 @@
 
 Evaluator::~Evaluator() = default;
 
-Evaluator* Evaluator::Create(const Evaluator::Options& options,
-                             Program* program,
-                             std::string* error) {
-  CHECK(options.context != NULL);
+std::unique_ptr<Evaluator> Evaluator::Create(const Evaluator::Options& options,
+                                             Program* program,
+                                             std::string* error) {
+  CHECK(options.context != nullptr);
 
   switch (options.linear_solver_type) {
     case DENSE_QR:
     case DENSE_NORMAL_CHOLESKY:
-      return new ProgramEvaluator<ScratchEvaluatePreparer, DenseJacobianWriter>(
+      return std::make_unique<
+          ProgramEvaluator<ScratchEvaluatePreparer, DenseJacobianWriter>>(
           options, program);
     case DENSE_SCHUR:
     case SPARSE_SCHUR:
     case ITERATIVE_SCHUR:
     case CGNR:
-      return new ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>(
+      return std::make_unique<
+          ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>>(
           options, program);
     case SPARSE_NORMAL_CHOLESKY:
       if (options.dynamic_sparsity) {
-        return new ProgramEvaluator<ScratchEvaluatePreparer,
-                                    DynamicCompressedRowJacobianWriter,
-                                    DynamicCompressedRowJacobianFinalizer>(
-            options, program);
+        return std::make_unique<
+            ProgramEvaluator<ScratchEvaluatePreparer,
+                             DynamicCompressedRowJacobianWriter,
+                             DynamicCompressedRowJacobianFinalizer>>(options,
+                                                                     program);
       } else {
-        return new ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>(
+        return std::make_unique<
+            ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>>(
             options, program);
       }
 
     default:
       *error = "Invalid Linear Solver Type. Unable to create evaluator.";
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
index 5c9cd56..e642551 100644
--- a/internal/ceres/evaluator.h
+++ b/internal/ceres/evaluator.h
@@ -68,9 +68,9 @@
     EvaluationCallback* evaluation_callback = nullptr;
   };
 
-  static Evaluator* Create(const Options& options,
-                           Program* program,
-                           std::string* error);
+  static std::unique_ptr<Evaluator> Create(const Options& options,
+                                           Program* program,
+                                           std::string* error);
 
   // Build and return a sparse matrix for storing and working with the Jacobian
   // of the objective function. The jacobian has dimensions
@@ -88,7 +88,7 @@
   // the jacobian for use with CHOLMOD, where as BlockOptimizationProblem
   // creates a BlockSparseMatrix representation of the jacobian for use in the
   // Schur complement based methods.
-  virtual SparseMatrix* CreateJacobian() const = 0;
+  virtual std::unique_ptr<SparseMatrix> CreateJacobian() const = 0;
 
   // Options struct to control Evaluator::Evaluate;
   struct EvaluateOptions {
@@ -102,10 +102,10 @@
 
   // Evaluate the cost function for the given state. Returns the cost,
   // residuals, and jacobian in the corresponding arguments. Both residuals and
-  // jacobian are optional; to avoid computing them, pass NULL.
+  // jacobian are optional; to avoid computing them, pass nullptr.
   //
-  // If non-NULL, the Jacobian must have a suitable sparsity pattern; only the
-  // values array of the jacobian is modified.
+  // If non-nullptr, the Jacobian must have a suitable sparsity pattern; only
+  // the values array of the jacobian is modified.
   //
   // state is an array of size NumParameters(), cost is a pointer to a single
   // double, and residuals is an array of doubles of size NumResiduals().
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
index 7aaf1b0..f5703f7 100644
--- a/internal/ceres/evaluator_test.cc
+++ b/internal/ceres/evaluator_test.cc
@@ -117,7 +117,7 @@
 };
 
 struct EvaluatorTest : public ::testing::TestWithParam<EvaluatorTestOptions> {
-  Evaluator* CreateEvaluator(Program* program) {
+  std::unique_ptr<Evaluator> CreateEvaluator(Program* program) {
     // This program is straight from the ProblemImpl, and so has no index/offset
     // yet; compute it here as required by the evaluator implementations.
     program->SetParameterOffsetsAndIndex();
@@ -152,8 +152,8 @@
                           const double* expected_residuals,
                           const double* expected_gradient,
                           const double* expected_jacobian) {
-    std::unique_ptr<Evaluator> evaluator(
-        CreateEvaluator(problem->mutable_program()));
+    std::unique_ptr<Evaluator> evaluator =
+        CreateEvaluator(problem->mutable_program());
     int num_residuals = expected_num_rows;
     int num_parameters = expected_num_cols;
 
@@ -608,8 +608,8 @@
   // The values are ignored.
   double state[9];
 
-  std::unique_ptr<Evaluator> evaluator(
-      CreateEvaluator(problem.mutable_program()));
+  std::unique_ptr<Evaluator> evaluator =
+      CreateEvaluator(problem.mutable_program());
   std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
   double cost;
   EXPECT_FALSE(evaluator->Evaluate(state, &cost, nullptr, nullptr, nullptr));
diff --git a/internal/ceres/evaluator_test_utils.cc b/internal/ceres/evaluator_test_utils.cc
index 25801db..f3384bc 100644
--- a/internal/ceres/evaluator_test_utils.cc
+++ b/internal/ceres/evaluator_test_utils.cc
@@ -49,7 +49,7 @@
                         const double* actual_jacobian) {
   EXPECT_EQ(expected_cost, actual_cost);
 
-  if (expected_residuals != NULL) {
+  if (expected_residuals != nullptr) {
     ConstVectorRef expected_residuals_vector(expected_residuals,
                                              expected_num_rows);
     ConstVectorRef actual_residuals_vector(actual_residuals, expected_num_rows);
@@ -61,7 +61,7 @@
         << expected_residuals_vector;
   }
 
-  if (expected_gradient != NULL) {
+  if (expected_gradient != nullptr) {
     ConstVectorRef expected_gradient_vector(expected_gradient,
                                             expected_num_cols);
     ConstVectorRef actual_gradient_vector(actual_gradient, expected_num_cols);
@@ -74,7 +74,7 @@
         << expected_gradient_vector.transpose();
   }
 
-  if (expected_jacobian != NULL) {
+  if (expected_jacobian != nullptr) {
     ConstMatrixRef expected_jacobian_matrix(
         expected_jacobian, expected_num_rows, expected_num_cols);
     ConstMatrixRef actual_jacobian_matrix(
diff --git a/internal/ceres/generate_bundle_adjustment_tests.py b/internal/ceres/generate_bundle_adjustment_tests.py
index 7b0caa3..86b4fcb 100644
--- a/internal/ceres/generate_bundle_adjustment_tests.py
+++ b/internal/ceres/generate_bundle_adjustment_tests.py
@@ -137,7 +137,7 @@
   options->sparse_linear_algebra_library_type = %(sparse_backend)s;
   options->preconditioner_type = %(preconditioner)s;
   if (%(ordering)s) {
-    options->linear_solver_ordering.reset();
+    options->linear_solver_ordering = nullptr;
   }
   Problem* problem = bundle_adjustment_problem.mutable_problem();
   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
diff --git a/internal/ceres/gradient_checker.cc b/internal/ceres/gradient_checker.cc
index c0dcd47..fd302f0 100644
--- a/internal/ceres/gradient_checker.cc
+++ b/internal/ceres/gradient_checker.cc
@@ -134,18 +134,17 @@
     manifolds_[i] = new internal::ManifoldAdapter(local_param);
   }
 
-  DynamicNumericDiffCostFunction<CostFunction, RIDDERS>*
-      finite_diff_cost_function =
-          new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
-              function, DO_NOT_TAKE_OWNERSHIP, options);
-  finite_diff_cost_function_.reset(finite_diff_cost_function);
-
+  auto finite_diff_cost_function =
+      std::make_unique<DynamicNumericDiffCostFunction<CostFunction, RIDDERS>>(
+          function, DO_NOT_TAKE_OWNERSHIP, options);
   const vector<int32_t>& parameter_block_sizes =
       function->parameter_block_sizes();
   for (int32_t parameter_block_size : parameter_block_sizes) {
     finite_diff_cost_function->AddParameterBlock(parameter_block_size);
   }
   finite_diff_cost_function->SetNumResiduals(function->num_residuals());
+
+  finite_diff_cost_function_ = std::move(finite_diff_cost_function);
 }
 
 GradientChecker::GradientChecker(const CostFunction* function,
@@ -158,12 +157,10 @@
   } else {
     manifolds_.resize(function->parameter_block_sizes().size(), nullptr);
   }
-  DynamicNumericDiffCostFunction<CostFunction, RIDDERS>*
-      finite_diff_cost_function =
-          new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
-              function, DO_NOT_TAKE_OWNERSHIP, options);
-  finite_diff_cost_function_.reset(finite_diff_cost_function);
 
+  auto finite_diff_cost_function =
+      std::make_unique<DynamicNumericDiffCostFunction<CostFunction, RIDDERS>>(
+          function, DO_NOT_TAKE_OWNERSHIP, options);
   const vector<int32_t>& parameter_block_sizes =
       function->parameter_block_sizes();
   const int num_parameter_blocks = parameter_block_sizes.size();
@@ -171,6 +168,8 @@
     finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
   }
   finite_diff_cost_function->SetNumResiduals(function->num_residuals());
+
+  finite_diff_cost_function_ = std::move(finite_diff_cost_function);
 }
 
 GradientChecker::~GradientChecker() {
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
index f786b83..bbe68b8 100644
--- a/internal/ceres/gradient_checking_cost_function.cc
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -87,7 +87,7 @@
                 double** jacobians) const final {
     if (!jacobians) {
       // Nothing to check in this case; just forward.
-      return function_->Evaluate(parameters, residuals, NULL);
+      return function_->Evaluate(parameters, residuals, nullptr);
     }
 
     GradientChecker::ProbeResults results;
@@ -107,7 +107,7 @@
     // Copy the original jacobian blocks into the jacobians array.
     const vector<int32_t>& block_sizes = function_->parameter_block_sizes();
     for (int k = 0; k < block_sizes.size(); k++) {
-      if (jacobians[k] != NULL) {
+      if (jacobians[k] != nullptr) {
         MatrixRef(jacobians[k],
                   results.jacobians[k].rows(),
                   results.jacobians[k].cols()) = results.jacobians[k];
@@ -169,7 +169,7 @@
                                           callback);
 }
 
-ProblemImpl* CreateGradientCheckingProblemImpl(
+std::unique_ptr<ProblemImpl> CreateGradientCheckingProblemImpl(
     ProblemImpl* problem_impl,
     double relative_step_size,
     double relative_precision,
@@ -190,8 +190,8 @@
   NumericDiffOptions numeric_diff_options;
   numeric_diff_options.relative_step_size = relative_step_size;
 
-  ProblemImpl* gradient_checking_problem_impl =
-      new ProblemImpl(gradient_checking_problem_options);
+  auto gradient_checking_problem_impl =
+      std::make_unique<ProblemImpl>(gradient_checking_problem_options);
 
   Program* program = problem_impl->mutable_program();
 
diff --git a/internal/ceres/gradient_checking_cost_function.h b/internal/ceres/gradient_checking_cost_function.h
index 46cf4dd..b21d113 100644
--- a/internal/ceres/gradient_checking_cost_function.h
+++ b/internal/ceres/gradient_checking_cost_function.h
@@ -92,8 +92,6 @@
 // iteration, the respective cost function will notify the
 // GradientCheckingIterationCallback.
 //
-// The caller owns the returned ProblemImpl object.
-//
 // Note: This is quite inefficient and is intended only for debugging.
 //
 // relative_step_size and relative_precision are parameters to control
@@ -102,11 +100,11 @@
 // jacobians obtained by numerically differentiating them. See the
 // documentation of 'numeric_derivative_relative_step_size' in solver.h for a
 // better explanation.
-CERES_EXPORT_INTERNAL ProblemImpl* CreateGradientCheckingProblemImpl(
-    ProblemImpl* problem_impl,
-    double relative_step_size,
-    double relative_precision,
-    GradientCheckingIterationCallback* callback);
+CERES_EXPORT_INTERNAL std::unique_ptr<ProblemImpl>
+CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
+                                  double relative_step_size,
+                                  double relative_precision,
+                                  GradientCheckingIterationCallback* callback);
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
index 264a858..5630c32 100644
--- a/internal/ceres/gradient_checking_cost_function_test.cc
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -366,8 +366,8 @@
   // clang-format on
 
   GradientCheckingIterationCallback callback;
-  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
-      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback));
+  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl =
+      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback);
 
   // The dimensions of the two problems match.
   EXPECT_EQ(problem_impl.NumParameterBlocks(),
@@ -444,8 +444,8 @@
   // clang-format on
 
   GradientCheckingIterationCallback callback;
-  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
-      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback));
+  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl =
+      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback);
 
   // The dimensions of the two problems match.
   EXPECT_EQ(problem_impl.NumParameterBlocks(),
@@ -505,8 +505,8 @@
   problem_impl.SetParameterUpperBound(x, 1, 2.5);
 
   GradientCheckingIterationCallback callback;
-  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
-      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback));
+  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl =
+      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback);
 
   // The dimensions of the two problems match.
   EXPECT_EQ(problem_impl.NumParameterBlocks(),
diff --git a/internal/ceres/gradient_problem.cc b/internal/ceres/gradient_problem.cc
index 3ef8e4f..93b18f5 100644
--- a/internal/ceres/gradient_problem.cc
+++ b/internal/ceres/gradient_problem.cc
@@ -53,9 +53,10 @@
       scratch_(new double[function_->NumParameters()]) {
   CHECK(function != nullptr);
   if (parameterization != nullptr) {
-    manifold_.reset(new internal::ManifoldAdapter(parameterization_.get()));
+    manifold_ =
+        std::make_unique<internal::ManifoldAdapter>(parameterization_.get());
   } else {
-    manifold_.reset(new EuclideanManifold(function_->NumParameters()));
+    manifold_ = std::make_unique<EuclideanManifold>(function_->NumParameters());
   }
   CHECK_EQ(function_->NumParameters(), manifold_->AmbientSize());
 }
diff --git a/internal/ceres/gradient_problem_evaluator.h b/internal/ceres/gradient_problem_evaluator.h
index a510b25..f0aba94 100644
--- a/internal/ceres/gradient_problem_evaluator.h
+++ b/internal/ceres/gradient_problem_evaluator.h
@@ -32,12 +32,14 @@
 #define CERES_INTERNAL_GRADIENT_PROBLEM_EVALUATOR_H_
 
 #include <map>
+#include <memory>
 #include <string>
 
 #include "ceres/evaluator.h"
 #include "ceres/execution_summary.h"
 #include "ceres/gradient_problem.h"
 #include "ceres/internal/port.h"
+#include "ceres/sparse_matrix.h"
 #include "ceres/wall_time.h"
 
 namespace ceres {
@@ -47,15 +49,18 @@
  public:
   explicit GradientProblemEvaluator(const GradientProblem& problem)
       : problem_(problem) {}
+
   ~GradientProblemEvaluator() override = default;
-  SparseMatrix* CreateJacobian() const final { return nullptr; }
+
+  std::unique_ptr<SparseMatrix> CreateJacobian() const final { return nullptr; }
+
   bool Evaluate(const EvaluateOptions& evaluate_options,
                 const double* state,
                 double* cost,
                 double* residuals,
                 double* gradient,
                 SparseMatrix* jacobian) final {
-    CHECK(jacobian == NULL);
+    CHECK(jacobian == nullptr);
     ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
     // The reason we use Residual and Jacobian here even when we are
     // only computing the cost and gradient has to do with the fact
@@ -65,7 +70,7 @@
     // to be consistent across the code base for the time accounting
     // to work.
     ScopedExecutionTimer call_type_timer(
-        gradient == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian",
+        gradient == nullptr ? "Evaluator::Residual" : "Evaluator::Jacobian",
         &execution_summary_);
     return problem_.Evaluate(state, cost, gradient);
   }
diff --git a/internal/ceres/gradient_problem_solver.cc b/internal/ceres/gradient_problem_solver.cc
index f754274..582895d 100644
--- a/internal/ceres/gradient_problem_solver.cc
+++ b/internal/ceres/gradient_problem_solver.cc
@@ -135,21 +135,22 @@
   // now.
   Minimizer::Options minimizer_options =
       Minimizer::Options(GradientProblemSolverOptionsToSolverOptions(options));
-  minimizer_options.evaluator.reset(new GradientProblemEvaluator(problem));
+  minimizer_options.evaluator =
+      std::make_unique<GradientProblemEvaluator>(problem);
 
   std::unique_ptr<IterationCallback> logging_callback;
   if (options.logging_type != SILENT) {
-    logging_callback.reset(
-        new LoggingCallback(LINE_SEARCH, options.minimizer_progress_to_stdout));
+    logging_callback = std::make_unique<LoggingCallback>(
+        LINE_SEARCH, options.minimizer_progress_to_stdout);
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
                                        logging_callback.get());
   }
 
   std::unique_ptr<IterationCallback> state_updating_callback;
   if (options.update_state_every_iteration) {
-    state_updating_callback.reset(
-        new GradientProblemSolverStateUpdatingCallback(
-            problem.NumParameters(), solution.data(), parameters_ptr));
+    state_updating_callback =
+        std::make_unique<GradientProblemSolverStateUpdatingCallback>(
+            problem.NumParameters(), solution.data(), parameters_ptr);
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
                                        state_updating_callback.get());
   }
diff --git a/internal/ceres/gradient_problem_solver_test.cc b/internal/ceres/gradient_problem_solver_test.cc
index e3b4085..5e33b31 100644
--- a/internal/ceres/gradient_problem_solver_test.cc
+++ b/internal/ceres/gradient_problem_solver_test.cc
@@ -48,7 +48,7 @@
     const double y = parameters[1];
 
     cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x);
-    if (gradient != NULL) {
+    if (gradient != nullptr) {
       gradient[0] = -2.0 * (1.0 - x) - 200.0 * (y - x * x) * 2.0 * x;
       gradient[1] = 200.0 * (y - x * x);
     }
@@ -79,7 +79,7 @@
                 double* gradient) const final {
     const double x = parameters[0];
     *cost = 0.5 * (5.0 - x) * (5.0 - x);
-    if (gradient != NULL) {
+    if (gradient != nullptr) {
       gradient[0] = x - 5.0;
     }
 
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
index bb2da17..4e712b9 100644
--- a/internal/ceres/implicit_schur_complement.cc
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -43,7 +43,7 @@
 
 ImplicitSchurComplement::ImplicitSchurComplement(
     const LinearSolver::Options& options)
-    : options_(options), D_(NULL), b_(NULL) {}
+    : options_(options), D_(nullptr), b_(nullptr) {}
 
 ImplicitSchurComplement::~ImplicitSchurComplement() = default;
 
@@ -52,8 +52,8 @@
                                    const double* b) {
   // Since initialization is reasonably heavy, perhaps we can save on
   // constructing a new object everytime.
-  if (A_ == NULL) {
-    A_.reset(PartitionedMatrixViewBase::Create(options_, A));
+  if (A_ == nullptr) {
+    A_ = PartitionedMatrixViewBase::Create(options_, A);
   }
 
   D_ = D;
@@ -61,10 +61,10 @@
 
   // Initialize temporary storage and compute the block diagonals of
   // E'E and F'E.
-  if (block_diagonal_EtE_inverse_ == NULL) {
-    block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
+  if (block_diagonal_EtE_inverse_ == nullptr) {
+    block_diagonal_EtE_inverse_ = A_->CreateBlockDiagonalEtE();
     if (options_.preconditioner_type == JACOBI) {
-      block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
+      block_diagonal_FtF_inverse_ = A_->CreateBlockDiagonalFtF();
     }
     rhs_.resize(A_->num_cols_f());
     rhs_.setZero();
@@ -84,7 +84,7 @@
   // the block diagonals and invert them.
   AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
   if (options_.preconditioner_type == JACOBI) {
-    AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
+    AddDiagonalAndInvert((D_ == nullptr) ? nullptr : D_ + A_->num_cols_e(),
                          block_diagonal_FtF_inverse_.get());
   }
 
@@ -118,7 +118,7 @@
   A_->RightMultiplyE(tmp_e_cols_2_.data(), tmp_rows_.data());
 
   // y5 = D * x
-  if (D_ != NULL) {
+  if (D_ != nullptr) {
     ConstVectorRef Dref(D_ + A_->num_cols_e(), num_cols());
     VectorRef(y, num_cols()) =
         (Dref.array().square() * ConstVectorRef(x, num_cols()).array())
@@ -146,7 +146,7 @@
                 row_block_size,
                 row_block_size);
 
-    if (D != NULL) {
+    if (D != nullptr) {
       ConstVectorRef d(D + row_block_pos, row_block_size);
       m += d.array().square().matrix().asDiagonal();
     }
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
index b6d886f..b4f618c 100644
--- a/internal/ceres/implicit_schur_complement_test.cc
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -57,13 +57,13 @@
 class ImplicitSchurComplementTest : public ::testing::Test {
  protected:
   void SetUp() final {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(2));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(2);
 
     CHECK(problem != nullptr);
     A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
-    b_.reset(problem->b.release());
-    D_.reset(problem->D.release());
+    b_ = std::move(problem->b);
+    D_ = std::move(problem->D);
 
     num_cols_ = A_->num_cols();
     num_rows_ = A_->num_rows();
@@ -90,8 +90,8 @@
     ContextImpl context;
     options.context = &context;
 
-    std::unique_ptr<SchurEliminatorBase> eliminator(
-        SchurEliminatorBase::Create(options));
+    std::unique_ptr<SchurEliminatorBase> eliminator =
+        SchurEliminatorBase::Create(options);
     CHECK(eliminator != nullptr);
     const bool kFullRankETE = true;
     eliminator->Init(num_eliminate_blocks_, kFullRankETE, bs);
@@ -202,7 +202,7 @@
 // We do this with and without regularization to check that the
 // support for the LM diagonal is correct.
 TEST_F(ImplicitSchurComplementTest, SchurMatrixValuesTest) {
-  EXPECT_TRUE(TestImplicitSchurComplement(NULL));
+  EXPECT_TRUE(TestImplicitSchurComplement(nullptr));
   EXPECT_TRUE(TestImplicitSchurComplement(D_.get()));
 }
 
diff --git a/internal/ceres/inner_product_computer.cc b/internal/ceres/inner_product_computer.cc
index ef38b7b..1a3a1a1 100644
--- a/internal/ceres/inner_product_computer.cc
+++ b/internal/ceres/inner_product_computer.cc
@@ -44,11 +44,12 @@
 // or the lower triangular part of the product.
 //
 // num_nonzeros is the number of non-zeros in the result matrix.
-CompressedRowSparseMatrix* InnerProductComputer::CreateResultMatrix(
+std::unique_ptr<CompressedRowSparseMatrix>
+InnerProductComputer::CreateResultMatrix(
     const CompressedRowSparseMatrix::StorageType storage_type,
     const int num_nonzeros) {
-  CompressedRowSparseMatrix* matrix =
-      new CompressedRowSparseMatrix(m_.num_cols(), m_.num_cols(), num_nonzeros);
+  auto matrix = std::make_unique<CompressedRowSparseMatrix>(
+      m_.num_cols(), m_.num_cols(), num_nonzeros);
   matrix->set_storage_type(storage_type);
 
   const CompressedRowBlockStructure* bs = m_.block_structure();
@@ -116,14 +117,14 @@
 //
 // product_storage_type controls the form of the output matrix. It
 // can be LOWER_TRIANGULAR or UPPER_TRIANGULAR.
-InnerProductComputer* InnerProductComputer::Create(
+std::unique_ptr<InnerProductComputer> InnerProductComputer::Create(
     const BlockSparseMatrix& m,
     CompressedRowSparseMatrix::StorageType product_storage_type) {
   return InnerProductComputer::Create(
       m, 0, m.block_structure()->rows.size(), product_storage_type);
 }
 
-InnerProductComputer* InnerProductComputer::Create(
+std::unique_ptr<InnerProductComputer> InnerProductComputer::Create(
     const BlockSparseMatrix& m,
     const int start_row_block,
     const int end_row_block,
@@ -132,8 +133,8 @@
         product_storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR);
   CHECK_GT(m.num_nonzeros(), 0)
       << "Congratulations, you found a bug in Ceres. Please report it.";
-  InnerProductComputer* inner_product_computer =
-      new InnerProductComputer(m, start_row_block, end_row_block);
+  std::unique_ptr<InnerProductComputer> inner_product_computer(
+      new InnerProductComputer(m, start_row_block, end_row_block));
   inner_product_computer->Init(product_storage_type);
   return inner_product_computer;
 }
@@ -183,7 +184,7 @@
   std::vector<int> row_block_nnz;
   const int num_nonzeros = ComputeNonzeros(product_terms, &row_block_nnz);
 
-  result_.reset(CreateResultMatrix(product_storage_type, num_nonzeros));
+  result_ = CreateResultMatrix(product_storage_type, num_nonzeros);
 
   // Populate the row non-zero counts in the result matrix.
   int* crsm_rows = result_->mutable_rows();
diff --git a/internal/ceres/inner_product_computer.h b/internal/ceres/inner_product_computer.h
index 04ec1d1..abf81be 100644
--- a/internal/ceres/inner_product_computer.h
+++ b/internal/ceres/inner_product_computer.h
@@ -74,7 +74,7 @@
   //
   // The user must ensure that the matrix m is valid for the life time
   // of this object.
-  static InnerProductComputer* Create(
+  static std::unique_ptr<InnerProductComputer> Create(
       const BlockSparseMatrix& m,
       CompressedRowSparseMatrix::StorageType storage_type);
 
@@ -83,7 +83,7 @@
   //
   // a = m(start_row_block : end_row_block, :);
   // result = a' * a;
-  static InnerProductComputer* Create(
+  static std::unique_ptr<InnerProductComputer> Create(
       const BlockSparseMatrix& m,
       int start_row_block,
       int end_row_block,
@@ -127,7 +127,7 @@
 
   void Init(CompressedRowSparseMatrix::StorageType storage_type);
 
-  CompressedRowSparseMatrix* CreateResultMatrix(
+  std::unique_ptr<CompressedRowSparseMatrix> CreateResultMatrix(
       const CompressedRowSparseMatrix::StorageType storage_type,
       int num_nonzeros);
 
diff --git a/internal/ceres/inner_product_computer_test.cc b/internal/ceres/inner_product_computer_test.cc
index f65c8a3..b672f8c 100644
--- a/internal/ceres/inner_product_computer_test.cc
+++ b/internal/ceres/inner_product_computer_test.cc
@@ -128,11 +128,11 @@
 
         std::unique_ptr<InnerProductComputer> inner_product_computer;
 
-        inner_product_computer.reset(InnerProductComputer::Create(
-            *random_matrix, CompressedRowSparseMatrix::LOWER_TRIANGULAR));
+        inner_product_computer = InnerProductComputer::Create(
+            *random_matrix, CompressedRowSparseMatrix::LOWER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
-        inner_product_computer.reset(InnerProductComputer::Create(
-            *random_matrix, CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+        inner_product_computer = InnerProductComputer::Create(
+            *random_matrix, CompressedRowSparseMatrix::UPPER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
       }
     }
@@ -202,17 +202,17 @@
             eigen_random_matrix.transpose() * eigen_random_matrix;
 
         std::unique_ptr<InnerProductComputer> inner_product_computer;
-        inner_product_computer.reset(InnerProductComputer::Create(
+        inner_product_computer = InnerProductComputer::Create(
             *random_matrix,
             start_row_block,
             end_row_block,
-            CompressedRowSparseMatrix::LOWER_TRIANGULAR));
+            CompressedRowSparseMatrix::LOWER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
-        inner_product_computer.reset(InnerProductComputer::Create(
+        inner_product_computer = InnerProductComputer::Create(
             *random_matrix,
             start_row_block,
             end_row_block,
-            CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+            CompressedRowSparseMatrix::UPPER_TRIANGULAR);
         COMPUTE_AND_COMPARE;
       }
     }
diff --git a/internal/ceres/is_close.h b/internal/ceres/is_close.h
index b781a44..82dc7e9 100644
--- a/internal/ceres/is_close.h
+++ b/internal/ceres/is_close.h
@@ -39,7 +39,7 @@
 namespace internal {
 // Returns true if x and y have a relative (unsigned) difference less than
 // relative_precision and false otherwise. Stores the relative and absolute
-// difference in relative/absolute_error if non-NULL. If one of the two values
+// difference in relative/absolute_error if non-nullptr. If one of the two values
 // is exactly zero, the absolute difference will be compared, and relative_error
 // will be set to the absolute difference.
 CERES_EXPORT_INTERNAL bool IsClose(double x,
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index e6340df..6ff84b5 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -69,13 +69,13 @@
   CHECK(A->block_structure() != nullptr);
   const int num_eliminate_blocks = options_.elimination_groups[0];
   // Initialize a ImplicitSchurComplement object.
-  if (schur_complement_ == NULL) {
+  if (schur_complement_ == nullptr) {
     DetectStructure(*(A->block_structure()),
                     num_eliminate_blocks,
                     &options_.row_block_size,
                     &options_.e_block_size,
                     &options_.f_block_size);
-    schur_complement_.reset(new ImplicitSchurComplement(options_));
+    schur_complement_ = std::make_unique<ImplicitSchurComplement>(options_);
   }
   schur_complement_->Init(*A, per_solve_options.D, b);
 
@@ -86,7 +86,7 @@
     LinearSolver::Summary summary;
     summary.num_iterations = 0;
     summary.termination_type = LINEAR_SOLVER_SUCCESS;
-    schur_complement_->BackSubstitute(NULL, x);
+    schur_complement_->BackSubstitute(nullptr, x);
     return summary;
   }
 
@@ -104,7 +104,7 @@
   cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance;
 
   CreatePreconditioner(A);
-  if (preconditioner_.get() != NULL) {
+  if (preconditioner_.get() != nullptr) {
     if (!preconditioner_->Update(*A, per_solve_options.D)) {
       LinearSolver::Summary summary;
       summary.num_iterations = 0;
@@ -134,7 +134,7 @@
 void IterativeSchurComplementSolver::CreatePreconditioner(
     BlockSparseMatrix* A) {
   if (options_.preconditioner_type == IDENTITY ||
-      preconditioner_.get() != NULL) {
+      preconditioner_.get() != nullptr) {
     return;
   }
 
@@ -149,22 +149,22 @@
   preconditioner_options.e_block_size = options_.e_block_size;
   preconditioner_options.f_block_size = options_.f_block_size;
   preconditioner_options.elimination_groups = options_.elimination_groups;
-  CHECK(options_.context != NULL);
+  CHECK(options_.context != nullptr);
   preconditioner_options.context = options_.context;
 
   switch (options_.preconditioner_type) {
     case JACOBI:
-      preconditioner_.reset(new SparseMatrixPreconditionerWrapper(
-          schur_complement_->block_diagonal_FtF_inverse()));
+      preconditioner_ = std::make_unique<SparseMatrixPreconditionerWrapper>(
+          schur_complement_->block_diagonal_FtF_inverse());
       break;
     case SCHUR_JACOBI:
-      preconditioner_.reset(new SchurJacobiPreconditioner(
-          *A->block_structure(), preconditioner_options));
+      preconditioner_ = std::make_unique<SchurJacobiPreconditioner>(
+          *A->block_structure(), preconditioner_options);
       break;
     case CLUSTER_JACOBI:
     case CLUSTER_TRIDIAGONAL:
-      preconditioner_.reset(new VisibilityBasedPreconditioner(
-          *A->block_structure(), preconditioner_options));
+      preconditioner_ = std::make_unique<VisibilityBasedPreconditioner>(
+          *A->block_structure(), preconditioner_options);
       break;
     default:
       LOG(FATAL) << "Unknown Preconditioner Type";
diff --git a/internal/ceres/iterative_schur_complement_solver_test.cc b/internal/ceres/iterative_schur_complement_solver_test.cc
index fdd65c7..80e388a 100644
--- a/internal/ceres/iterative_schur_complement_solver_test.cc
+++ b/internal/ceres/iterative_schur_complement_solver_test.cc
@@ -61,13 +61,13 @@
 class IterativeSchurComplementSolverTest : public ::testing::Test {
  protected:
   void SetUpProblem(int problem_id) {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(problem_id));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(problem_id);
 
     CHECK(problem != nullptr);
     A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
-    b_.reset(problem->b.release());
-    D_.reset(problem->D.release());
+    b_ = std::move(problem->b);
+    D_ = std::move(problem->D);
 
     num_cols_ = A_->num_cols();
     num_rows_ = A_->num_rows();
@@ -121,13 +121,13 @@
 
 TEST_F(IterativeSchurComplementSolverTest, NormalProblem) {
   SetUpProblem(2);
-  EXPECT_TRUE(TestSolver(NULL));
+  EXPECT_TRUE(TestSolver(nullptr));
   EXPECT_TRUE(TestSolver(D_.get()));
 }
 
 TEST_F(IterativeSchurComplementSolverTest, ProblemWithNoFBlocks) {
   SetUpProblem(3);
-  EXPECT_TRUE(TestSolver(NULL));
+  EXPECT_TRUE(TestSolver(nullptr));
   EXPECT_TRUE(TestSolver(D_.get()));
 }
 
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
index c05dad1..dfb7302 100644
--- a/internal/ceres/levenberg_marquardt_strategy_test.cc
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -87,7 +87,7 @@
 
   // We need a non-null pointer here, so anything should do.
   std::unique_ptr<LinearSolver> linear_solver(
-      new RegularizationCheckingLinearSolver(0, NULL));
+      new RegularizationCheckingLinearSolver(0, nullptr));
   options.linear_solver = linear_solver.get();
 
   LevenbergMarquardtStrategy lms(options);
diff --git a/internal/ceres/line_search.cc b/internal/ceres/line_search.cc
index 7e871a2..0141015 100644
--- a/internal/ceres/line_search.cc
+++ b/internal/ceres/line_search.cc
@@ -68,24 +68,21 @@
 LineSearch::LineSearch(const LineSearch::Options& options)
     : options_(options) {}
 
-LineSearch* LineSearch::Create(const LineSearchType line_search_type,
-                               const LineSearch::Options& options,
-                               string* error) {
-  LineSearch* line_search = NULL;
+std::unique_ptr<LineSearch> LineSearch::Create(
+    const LineSearchType line_search_type,
+    const LineSearch::Options& options,
+    string* error) {
   switch (line_search_type) {
     case ceres::ARMIJO:
-      line_search = new ArmijoLineSearch(options);
-      break;
+      return std::make_unique<ArmijoLineSearch>(options);
     case ceres::WOLFE:
-      line_search = new WolfeLineSearch(options);
-      break;
+      return std::make_unique<WolfeLineSearch>(options);
     default:
       *error = string("Invalid line search algorithm type: ") +
                LineSearchTypeToString(line_search_type) +
                string(", unable to create line search.");
-      return NULL;
   }
-  return line_search;
+  return nullptr;
 }
 
 LineSearchFunction::LineSearchFunction(Evaluator* evaluator)
@@ -119,13 +116,13 @@
   }
   output->vector_x_is_valid = true;
 
-  double* gradient = NULL;
+  double* gradient = nullptr;
   if (evaluate_gradient) {
     output->vector_gradient.resize(direction_.rows(), 1);
     gradient = output->vector_gradient.data();
   }
   const bool eval_status = evaluator_->Evaluate(
-      output->vector_x.data(), &(output->value), NULL, gradient, NULL);
+      output->vector_x.data(), &(output->value), nullptr, gradient, nullptr);
 
   if (!eval_status || !std::isfinite(output->value)) {
     return;
diff --git a/internal/ceres/line_search.h b/internal/ceres/line_search.h
index 0af4884..7d1a852 100644
--- a/internal/ceres/line_search.h
+++ b/internal/ceres/line_search.h
@@ -163,9 +163,10 @@
   explicit LineSearch(const LineSearch::Options& options);
   virtual ~LineSearch() = default;
 
-  static LineSearch* Create(const LineSearchType line_search_type,
-                            const LineSearch::Options& options,
-                            std::string* error);
+  static std::unique_ptr<LineSearch> Create(
+      const LineSearchType line_search_type,
+      const LineSearch::Options& options,
+      std::string* error);
 
   // Perform the line search.
   //
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
index 2b707fb..4d6e641 100644
--- a/internal/ceres/line_search_direction.cc
+++ b/internal/ceres/line_search_direction.cc
@@ -338,32 +338,32 @@
   bool is_positive_definite_;
 };
 
-LineSearchDirection* LineSearchDirection::Create(
+std::unique_ptr<LineSearchDirection> LineSearchDirection::Create(
     const LineSearchDirection::Options& options) {
   if (options.type == STEEPEST_DESCENT) {
-    return new SteepestDescent;
+    return std::make_unique<SteepestDescent>();
   }
 
   if (options.type == NONLINEAR_CONJUGATE_GRADIENT) {
-    return new NonlinearConjugateGradient(
+    return std::make_unique<NonlinearConjugateGradient>(
         options.nonlinear_conjugate_gradient_type, options.function_tolerance);
   }
 
   if (options.type == ceres::LBFGS) {
-    return new ceres::internal::LBFGS(
+    return std::make_unique<ceres::internal::LBFGS>(
         options.num_parameters,
         options.max_lbfgs_rank,
         options.use_approximate_eigenvalue_bfgs_scaling);
   }
 
   if (options.type == ceres::BFGS) {
-    return new ceres::internal::BFGS(
+    return std::make_unique<ceres::internal::BFGS>(
         options.num_parameters,
         options.use_approximate_eigenvalue_bfgs_scaling);
   }
 
   LOG(ERROR) << "Unknown line search direction type: " << options.type;
-  return NULL;
+  return nullptr;
 }
 
 }  // namespace internal
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h
index e8b1139..29127c1 100644
--- a/internal/ceres/line_search_direction.h
+++ b/internal/ceres/line_search_direction.h
@@ -57,7 +57,7 @@
     bool use_approximate_eigenvalue_bfgs_scaling;
   };
 
-  static LineSearchDirection* Create(const Options& options);
+  static std::unique_ptr<LineSearchDirection> Create(const Options& options);
 
   virtual ~LineSearchDirection() = default;
   virtual bool NextDirection(const LineSearchMinimizer::State& previous,
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc
index ea1c507..6768d4b 100644
--- a/internal/ceres/line_search_minimizer.cc
+++ b/internal/ceres/line_search_minimizer.cc
@@ -171,8 +171,8 @@
   line_search_direction_options.max_lbfgs_rank = options.max_lbfgs_rank;
   line_search_direction_options.use_approximate_eigenvalue_bfgs_scaling =
       options.use_approximate_eigenvalue_bfgs_scaling;
-  std::unique_ptr<LineSearchDirection> line_search_direction(
-      LineSearchDirection::Create(line_search_direction_options));
+  std::unique_ptr<LineSearchDirection> line_search_direction =
+      LineSearchDirection::Create(line_search_direction_options);
 
   LineSearchFunction line_search_function(evaluator);
 
@@ -280,8 +280,8 @@
                      << options.max_num_line_search_direction_restarts
                      << " [max].";
       }
-      line_search_direction.reset(
-          LineSearchDirection::Create(line_search_direction_options));
+      line_search_direction =
+          LineSearchDirection::Create(line_search_direction_options);
       current_state.search_direction = -current_state.gradient;
     }
 
diff --git a/internal/ceres/line_search_minimizer_test.cc b/internal/ceres/line_search_minimizer_test.cc
index 2ef27b9..76a4a47 100644
--- a/internal/ceres/line_search_minimizer_test.cc
+++ b/internal/ceres/line_search_minimizer_test.cc
@@ -44,7 +44,7 @@
                 double* cost,
                 double* gradient) const final {
     cost[0] = parameters[0] * parameters[0];
-    if (gradient != NULL) {
+    if (gradient != nullptr) {
       gradient[0] = 2.0 * parameters[0];
     }
     return true;
diff --git a/internal/ceres/line_search_preprocessor.cc b/internal/ceres/line_search_preprocessor.cc
index 465d9a4..02a1a8c 100644
--- a/internal/ceres/line_search_preprocessor.cc
+++ b/internal/ceres/line_search_preprocessor.cc
@@ -63,9 +63,9 @@
   pp->evaluator_options.context = pp->problem->context();
   pp->evaluator_options.evaluation_callback =
       pp->reduced_program->mutable_evaluation_callback();
-  pp->evaluator.reset(Evaluator::Create(
-      pp->evaluator_options, pp->reduced_program.get(), &pp->error));
-  return (pp->evaluator.get() != NULL);
+  pp->evaluator = Evaluator::Create(
+      pp->evaluator_options, pp->reduced_program.get(), &pp->error);
+  return (pp->evaluator.get() != nullptr);
 }
 
 }  // namespace
@@ -85,10 +85,10 @@
     return false;
   }
 
-  pp->reduced_program.reset(program->CreateReducedProgram(
-      &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error));
+  pp->reduced_program = program->CreateReducedProgram(
+      &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error);
 
-  if (pp->reduced_program.get() == NULL) {
+  if (pp->reduced_program.get() == nullptr) {
     return false;
   }
 
diff --git a/internal/ceres/line_search_preprocessor_test.cc b/internal/ceres/line_search_preprocessor_test.cc
index b64946c..90cca10 100644
--- a/internal/ceres/line_search_preprocessor_test.cc
+++ b/internal/ceres/line_search_preprocessor_test.cc
@@ -85,7 +85,7 @@
 TEST(LineSearchPreprocessor, RemoveParameterBlocksFailed) {
   ProblemImpl problem;
   double x = 3.0;
-  problem.AddResidualBlock(new FailingCostFunction, NULL, &x);
+  problem.AddResidualBlock(new FailingCostFunction, nullptr, &x);
   problem.SetParameterBlockConstant(&x);
   Solver::Options options;
   options.minimizer_type = LINE_SEARCH;
@@ -121,8 +121,8 @@
   double x = 1.0;
   double y = 1.0;
   double z = 1.0;
-  problem.AddResidualBlock(new DummyCostFunction<1, 1, 1>, NULL, &x, &y);
-  problem.AddResidualBlock(new DummyCostFunction<1, 1, 1>, NULL, &y, &z);
+  problem.AddResidualBlock(new DummyCostFunction<1, 1, 1>, nullptr, &x, &y);
+  problem.AddResidualBlock(new DummyCostFunction<1, 1, 1>, nullptr, &y, &z);
 
   Solver::Options options;
   options.minimizer_type = LINE_SEARCH;
@@ -131,7 +131,7 @@
   PreprocessedProblem pp;
   EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
   EXPECT_EQ(pp.evaluator_options.linear_solver_type, CGNR);
-  EXPECT_TRUE(pp.evaluator.get() != NULL);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
 }
 
 }  // namespace internal
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
index 73787d6..4b37e00 100644
--- a/internal/ceres/linear_least_squares_problems.cc
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -49,7 +49,8 @@
 
 using std::string;
 
-LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id) {
+std::unique_ptr<LinearLeastSquaresProblem>
+CreateLinearLeastSquaresProblemFromId(int id) {
   switch (id) {
     case 0:
       return LinearLeastSquaresProblem0();
@@ -64,7 +65,7 @@
     default:
       LOG(FATAL) << "Unknown problem id requested " << id;
   }
-  return NULL;
+  return nullptr;
 }
 
 /*
@@ -85,15 +86,16 @@
 x_D = [1.78448275;
        2.82327586;]
  */
-LinearLeastSquaresProblem* LinearLeastSquaresProblem0() {
-  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem0() {
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      std::make_unique<LinearLeastSquaresProblem>();
 
-  TripletSparseMatrix* A = new TripletSparseMatrix(3, 2, 6);
-  problem->b.reset(new double[3]);
-  problem->D.reset(new double[2]);
+  auto A = std::make_unique<TripletSparseMatrix>(3, 2, 6);
+  problem->b = std::make_unique<double[]>(3);
+  problem->D = std::make_unique<double[]>(2);
 
-  problem->x.reset(new double[2]);
-  problem->x_D.reset(new double[2]);
+  problem->x = std::make_unique<double[]>(2);
+  problem->x_D = std::make_unique<double[]>(2);
 
   int* Ai = A->mutable_rows();
   int* Aj = A->mutable_cols();
@@ -115,7 +117,7 @@
   Ax[4] = 6;
   Ax[5] = -10;
   A->set_num_nonzeros(6);
-  problem->A.reset(A);
+  problem->A = std::move(A);
 
   problem->b[0] = 8;
   problem->b[1] = 18;
@@ -181,15 +183,16 @@
 // BlockSparseMatrix version of this problem.
 
 // TripletSparseMatrix version.
-LinearLeastSquaresProblem* LinearLeastSquaresProblem1() {
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem1() {
   int num_rows = 6;
   int num_cols = 5;
 
-  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
-  TripletSparseMatrix* A =
-      new TripletSparseMatrix(num_rows, num_cols, num_rows * num_cols);
-  problem->b.reset(new double[num_rows]);
-  problem->D.reset(new double[num_cols]);
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      std::make_unique<LinearLeastSquaresProblem>();
+  auto A = std::make_unique<TripletSparseMatrix>(
+      num_rows, num_cols, num_rows * num_cols);
+  problem->b = std::make_unique<double[]>(num_rows);
+  problem->D = std::make_unique<double[]>(num_cols);
   problem->num_eliminate_blocks = 2;
 
   int* rows = A->mutable_rows();
@@ -271,7 +274,7 @@
   A->set_num_nonzeros(nnz);
   CHECK(A->IsValid());
 
-  problem->A.reset(A);
+  problem->A = std::move(A);
 
   for (int i = 0; i < num_cols; ++i) {
     problem->D.get()[i] = 1;
@@ -285,18 +288,20 @@
 }
 
 // BlockSparseMatrix version
-LinearLeastSquaresProblem* LinearLeastSquaresProblem2() {
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem2() {
   int num_rows = 6;
   int num_cols = 5;
 
-  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      std::make_unique<LinearLeastSquaresProblem>();
 
-  problem->b.reset(new double[num_rows]);
-  problem->D.reset(new double[num_cols]);
+  problem->b = std::make_unique<double[]>(num_rows);
+  problem->D = std::make_unique<double[]>(num_cols);
   problem->num_eliminate_blocks = 2;
 
   CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
-  std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
+  std::unique_ptr<double[]> values =
+      std::make_unique<double[]>(num_rows * num_cols);
 
   for (int c = 0; c < num_cols; ++c) {
     bs->cols.push_back(Block());
@@ -386,7 +391,7 @@
     row.cells.push_back(Cell(4, 12));
   }
 
-  BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+  auto A = std::make_unique<BlockSparseMatrix>(bs);
   memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
 
   for (int i = 0; i < num_cols; ++i) {
@@ -397,7 +402,7 @@
     problem->b.get()[i] = i;
   }
 
-  problem->A.reset(A);
+  problem->A = std::move(A);
 
   return problem;
 }
@@ -418,18 +423,20 @@
            5]
 */
 // BlockSparseMatrix version
-LinearLeastSquaresProblem* LinearLeastSquaresProblem3() {
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem3() {
   int num_rows = 5;
   int num_cols = 2;
 
-  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      std::make_unique<LinearLeastSquaresProblem>();
 
-  problem->b.reset(new double[num_rows]);
-  problem->D.reset(new double[num_cols]);
+  problem->b = std::make_unique<double[]>(num_rows);
+  problem->D = std::make_unique<double[]>(num_cols);
   problem->num_eliminate_blocks = 2;
 
   CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
-  std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
+  std::unique_ptr<double[]> values =
+      std::make_unique<double[]>(num_rows * num_cols);
 
   for (int c = 0; c < num_cols; ++c) {
     bs->cols.push_back(Block());
@@ -489,7 +496,7 @@
     row.cells.push_back(Cell(1, 4));
   }
 
-  BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+  auto A = std::make_unique<BlockSparseMatrix>(bs);
   memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
 
   for (int i = 0; i < num_cols; ++i) {
@@ -500,7 +507,7 @@
     problem->b.get()[i] = i;
   }
 
-  problem->A.reset(A);
+  problem->A = std::move(A);
 
   return problem;
 }
@@ -525,18 +532,20 @@
 //
 // NOTE: This problem is too small and rank deficient to be solved without
 // the diagonal regularization.
-LinearLeastSquaresProblem* LinearLeastSquaresProblem4() {
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem4() {
   int num_rows = 3;
   int num_cols = 7;
 
-  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+  std::unique_ptr<LinearLeastSquaresProblem> problem =
+      std::make_unique<LinearLeastSquaresProblem>();
 
-  problem->b.reset(new double[num_rows]);
-  problem->D.reset(new double[num_cols]);
+  problem->b = std::make_unique<double[]>(num_rows);
+  problem->D = std::make_unique<double[]>(num_cols);
   problem->num_eliminate_blocks = 1;
 
   CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
-  std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
+  std::unique_ptr<double[]> values =
+      std::make_unique<double[]>(num_rows * num_cols);
 
   // Column block structure
   bs->cols.push_back(Block());
@@ -590,7 +599,7 @@
     values[nnz++] = 1;
   }
 
-  BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+  auto A = std::make_unique<BlockSparseMatrix>(bs);
   memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
 
   for (int i = 0; i < num_cols; ++i) {
@@ -601,7 +610,7 @@
     problem->b.get()[i] = i;
   }
 
-  problem->A.reset(A);
+  problem->A = std::move(A);
   return problem;
 }
 
@@ -616,15 +625,15 @@
   A->ToDenseMatrix(&AA);
   LOG(INFO) << "A^T: \n" << AA.transpose();
 
-  if (D != NULL) {
+  if (D != nullptr) {
     LOG(INFO) << "A's appended diagonal:\n" << ConstVectorRef(D, A->num_cols());
   }
 
-  if (b != NULL) {
+  if (b != nullptr) {
     LOG(INFO) << "b: \n" << ConstVectorRef(b, A->num_rows());
   }
 
-  if (x != NULL) {
+  if (x != nullptr) {
     LOG(INFO) << "x: \n" << ConstVectorRef(x, A->num_cols());
   }
   return true;
@@ -673,21 +682,21 @@
         A->num_cols());
   }
 
-  if (D != NULL) {
+  if (D != nullptr) {
     string filename = filename_base + "_D.txt";
     WriteArrayToFileOrDie(filename, D, A->num_cols());
     StringAppendF(
         &matlab_script, "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
   }
 
-  if (b != NULL) {
+  if (b != nullptr) {
     string filename = filename_base + "_b.txt";
     WriteArrayToFileOrDie(filename, b, A->num_rows());
     StringAppendF(
         &matlab_script, "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
   }
 
-  if (x != NULL) {
+  if (x != nullptr) {
     string filename = filename_base + "_x.txt";
     WriteArrayToFileOrDie(filename, x, A->num_cols());
     StringAppendF(
diff --git a/internal/ceres/linear_least_squares_problems.h b/internal/ceres/linear_least_squares_problems.h
index cddaa9f..2120cc2 100644
--- a/internal/ceres/linear_least_squares_problems.h
+++ b/internal/ceres/linear_least_squares_problems.h
@@ -60,14 +60,14 @@
 };
 
 // Factories for linear least squares problem.
-CERES_EXPORT_INTERNAL LinearLeastSquaresProblem*
+CERES_EXPORT_INTERNAL std::unique_ptr<LinearLeastSquaresProblem>
 CreateLinearLeastSquaresProblemFromId(int id);
 
-LinearLeastSquaresProblem* LinearLeastSquaresProblem0();
-LinearLeastSquaresProblem* LinearLeastSquaresProblem1();
-LinearLeastSquaresProblem* LinearLeastSquaresProblem2();
-LinearLeastSquaresProblem* LinearLeastSquaresProblem3();
-LinearLeastSquaresProblem* LinearLeastSquaresProblem4();
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem0();
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem1();
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem2();
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem3();
+std::unique_ptr<LinearLeastSquaresProblem> LinearLeastSquaresProblem4();
 
 // Write the linear least squares problem to disk. The exact format
 // depends on dump_format_type.
diff --git a/internal/ceres/linear_solver.cc b/internal/ceres/linear_solver.cc
index befaa14..cf358bd 100644
--- a/internal/ceres/linear_solver.cc
+++ b/internal/ceres/linear_solver.cc
@@ -69,50 +69,51 @@
   return linear_solver_type;
 }
 
-LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
-  CHECK(options.context != NULL);
+std::unique_ptr<LinearSolver> LinearSolver::Create(
+    const LinearSolver::Options& options) {
+  CHECK(options.context != nullptr);
 
   switch (options.type) {
     case CGNR:
-      return new CgnrSolver(options);
+      return std::make_unique<CgnrSolver>(options);
 
     case SPARSE_NORMAL_CHOLESKY:
 #if defined(CERES_NO_SPARSE)
-      return NULL;
+      return nullptr;
 #else
       if (options.dynamic_sparsity) {
-        return new DynamicSparseNormalCholeskySolver(options);
+        return std::make_unique<DynamicSparseNormalCholeskySolver>(options);
       }
 
-      return new SparseNormalCholeskySolver(options);
+      return std::make_unique<SparseNormalCholeskySolver>(options);
 #endif
 
     case SPARSE_SCHUR:
 #if defined(CERES_NO_SPARSE)
-      return NULL;
+      return nullptr;
 #else
-      return new SparseSchurComplementSolver(options);
+      return std::make_unique<SparseSchurComplementSolver>(options);
 #endif
 
     case DENSE_SCHUR:
-      return new DenseSchurComplementSolver(options);
+      return std::make_unique<DenseSchurComplementSolver>(options);
 
     case ITERATIVE_SCHUR:
       if (options.use_explicit_schur_complement) {
-        return new SparseSchurComplementSolver(options);
+        return std::make_unique<SparseSchurComplementSolver>(options);
       } else {
-        return new IterativeSchurComplementSolver(options);
+        return std::make_unique<IterativeSchurComplementSolver>(options);
       }
 
     case DENSE_QR:
-      return new DenseQRSolver(options);
+      return std::make_unique<DenseQRSolver>(options);
 
     case DENSE_NORMAL_CHOLESKY:
-      return new DenseNormalCholeskySolver(options);
+      return std::make_unique<DenseNormalCholeskySolver>(options);
 
     default:
       LOG(FATAL) << "Unknown linear solver type :" << options.type;
-      return NULL;  // MSVC doesn't understand that LOG(FATAL) never returns.
+      return nullptr;  // MSVC doesn't understand that LOG(FATAL) never returns.
   }
 }
 
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index f4616cd..ea44f3b 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -288,7 +288,7 @@
   }
 
   // Factory
-  static LinearSolver* Create(const Options& options);
+  static std::unique_ptr<LinearSolver> Create(const Options& options);
 };
 
 // This templated subclass of LinearSolver serves as a base class for
diff --git a/internal/ceres/local_parameterization_test.cc b/internal/ceres/local_parameterization_test.cc
index 2321ce7..cb2ad74 100644
--- a/internal/ceres/local_parameterization_test.cc
+++ b/internal/ceres/local_parameterization_test.cc
@@ -283,7 +283,7 @@
   double jacobian_ref[12];
   double zero_delta[kLocalSize] = {0.0, 0.0, 0.0};
   const double* parameters[2] = {x, zero_delta};
-  double* jacobian_array[2] = {NULL, jacobian_ref};
+  double* jacobian_array[2] = {nullptr, jacobian_ref};
 
   // Autodiff jacobian at delta_x = 0.
   internal::AutoDifferentiate<kGlobalSize,
@@ -779,27 +779,27 @@
     const int global_size1 = 5;
     std::vector<int> constant_parameters1;
     constant_parameters1.push_back(2);
-    param1_.reset(
-        new SubsetParameterization(global_size1, constant_parameters1));
+    param1_ = std::make_unique<SubsetParameterization>(global_size1,
+                                                       constant_parameters1);
 
     const int global_size2 = 3;
     std::vector<int> constant_parameters2;
     constant_parameters2.push_back(0);
     constant_parameters2.push_back(1);
-    param2_.reset(
-        new SubsetParameterization(global_size2, constant_parameters2));
+    param2_ = std::make_unique<SubsetParameterization>(global_size2,
+                                                       constant_parameters2);
 
     const int global_size3 = 4;
     std::vector<int> constant_parameters3;
     constant_parameters3.push_back(1);
-    param3_.reset(
-        new SubsetParameterization(global_size3, constant_parameters3));
+    param3_ = std::make_unique<SubsetParameterization>(global_size3,
+                                                       constant_parameters3);
 
     const int global_size4 = 2;
     std::vector<int> constant_parameters4;
     constant_parameters4.push_back(1);
-    param4_.reset(
-        new SubsetParameterization(global_size4, constant_parameters4));
+    param4_ = std::make_unique<SubsetParameterization>(global_size4,
+                                                       constant_parameters4);
   }
 
   std::unique_ptr<LocalParameterization> param1_;
diff --git a/internal/ceres/loss_function.cc b/internal/ceres/loss_function.cc
index 353f29a..f4ea0f7 100644
--- a/internal/ceres/loss_function.cc
+++ b/internal/ceres/loss_function.cc
@@ -161,7 +161,7 @@
 }
 
 void ScaledLoss::Evaluate(double s, double rho[3]) const {
-  if (rho_.get() == NULL) {
+  if (rho_.get() == nullptr) {
     rho[0] = a_ * s;
     rho[1] = a_;
     rho[2] = 0.0;
diff --git a/internal/ceres/loss_function_test.cc b/internal/ceres/loss_function_test.cc
index 638c0c9..e5de95f 100644
--- a/internal/ceres/loss_function_test.cc
+++ b/internal/ceres/loss_function_test.cc
@@ -195,7 +195,7 @@
   // construction with the call to AssertLossFunctionIsValid() because Apple's
   // GCC is unable to eliminate the copy of ScaledLoss, which is not copyable.
   {
-    ScaledLoss scaled_loss(NULL, 6, TAKE_OWNERSHIP);
+    ScaledLoss scaled_loss(nullptr, 6, TAKE_OWNERSHIP);
     AssertLossFunctionIsValid(scaled_loss, 0.323);
   }
   {
@@ -265,17 +265,17 @@
     EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
   }
 
-  // Set to NULL
+  // Set to nullptr
   TrivialLoss loss_function4;
-  loss_function_wrapper.Reset(NULL, TAKE_OWNERSHIP);
+  loss_function_wrapper.Reset(nullptr, TAKE_OWNERSHIP);
   loss_function_wrapper.Evaluate(s, rho);
   loss_function4.Evaluate(s, rho_gold);
   for (int i = 0; i < 3; ++i) {
     EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
   }
 
-  // Set to NULL, not taking ownership
-  loss_function_wrapper.Reset(NULL, DO_NOT_TAKE_OWNERSHIP);
+  // Set to nullptr, not taking ownership
+  loss_function_wrapper.Reset(nullptr, DO_NOT_TAKE_OWNERSHIP);
   loss_function_wrapper.Evaluate(s, rho);
   loss_function4.Evaluate(s, rho_gold);
   for (int i = 0; i < 3; ++i) {
diff --git a/internal/ceres/minimizer.cc b/internal/ceres/minimizer.cc
index 8e27c7b..8562968 100644
--- a/internal/ceres/minimizer.cc
+++ b/internal/ceres/minimizer.cc
@@ -48,7 +48,7 @@
   }
 
   LOG(FATAL) << "Unknown minimizer_type: " << minimizer_type;
-  return NULL;
+  return nullptr;
 }
 
 Minimizer::~Minimizer() = default;
diff --git a/internal/ceres/normal_prior.cc b/internal/ceres/normal_prior.cc
index 4a62132..473d05c 100644
--- a/internal/ceres/normal_prior.cc
+++ b/internal/ceres/normal_prior.cc
@@ -56,7 +56,7 @@
   // r = A_ * (p - b_);
   // The extra eval is to get around a bug in the eigen library.
   r = A_ * (p - b_).eval();
-  if ((jacobians != NULL) && (jacobians[0] != NULL)) {
+  if ((jacobians != nullptr) && (jacobians[0] != nullptr)) {
     MatrixRef(jacobians[0], num_residuals(), parameter_block_sizes()[0]) = A_;
   }
   return true;
diff --git a/internal/ceres/normal_prior_test.cc b/internal/ceres/normal_prior_test.cc
index 518c18e..39a7eb6 100644
--- a/internal/ceres/normal_prior_test.cc
+++ b/internal/ceres/normal_prior_test.cc
@@ -106,7 +106,7 @@
       for (int i = 0; i < num_cols; ++i) x[i] = 2 * RandDouble() - 1;
 
       double* jacobians[1];
-      jacobians[0] = NULL;
+      jacobians[0] = nullptr;
 
       Vector residuals(num_rows);
 
@@ -118,7 +118,7 @@
           (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
       EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
 
-      prior.Evaluate(&x, residuals.data(), NULL);
+      prior.Evaluate(&x, residuals.data(), nullptr);
       // Compare the norm of the residual
       residual_diff_norm =
           (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
diff --git a/internal/ceres/numeric_diff_cost_function_test.cc b/internal/ceres/numeric_diff_cost_function_test.cc
index a5f7a15..b9a0beb 100644
--- a/internal/ceres/numeric_diff_cost_function_test.cc
+++ b/internal/ceres/numeric_diff_cost_function_test.cc
@@ -49,100 +49,102 @@
 namespace internal {
 
 TEST(NumericDiffCostFunction, EasyCaseFunctorCentralDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
-                                                  CENTRAL,
-                                                  3,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(new EasyFunctor));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyFunctor,
+                                               CENTRAL,
+                                               3,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new EasyFunctor);
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseFunctorForwardDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
-                                                  FORWARD,
-                                                  3,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(new EasyFunctor));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyFunctor,
+                                               FORWARD,
+                                               3,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new EasyFunctor);
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseFunctorRidders) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
-                                                  RIDDERS,
-                                                  3,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(new EasyFunctor));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyFunctor,
+                                               RIDDERS,
+                                               3,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new EasyFunctor);
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseCostFunctionCentralDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyCostFunction,
-                                  CENTRAL,
-                                  3,  // number of residuals
-                                  5,  // size of x1
-                                  5   // size of x2
-                                  >(new EasyCostFunction, TAKE_OWNERSHIP));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyCostFunction,
+                                               CENTRAL,
+                                               3,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new EasyCostFunction,
+                                                  TAKE_OWNERSHIP);
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseCostFunctionForwardDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyCostFunction,
-                                  FORWARD,
-                                  3,  // number of residuals
-                                  5,  // size of x1
-                                  5   // size of x2
-                                  >(new EasyCostFunction, TAKE_OWNERSHIP));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyCostFunction,
+                                               FORWARD,
+                                               3,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new EasyCostFunction,
+                                                  TAKE_OWNERSHIP);
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
 
 TEST(NumericDiffCostFunction, EasyCaseCostFunctionRidders) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyCostFunction,
-                                  RIDDERS,
-                                  3,  // number of residuals
-                                  5,  // size of x1
-                                  5   // size of x2
-                                  >(new EasyCostFunction, TAKE_OWNERSHIP));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyCostFunction,
+                                               RIDDERS,
+                                               3,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new EasyCostFunction,
+                                                  TAKE_OWNERSHIP);
+
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
 
 TEST(NumericDiffCostFunction, TranscendentalCaseFunctorCentralDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<TranscendentalFunctor,
-                                                  CENTRAL,
-                                                  2,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(new TranscendentalFunctor));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<TranscendentalFunctor,
+                                               CENTRAL,
+                                               2,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new TranscendentalFunctor);
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction, TranscendentalCaseFunctorForwardDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<TranscendentalFunctor,
-                                                  FORWARD,
-                                                  2,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(new TranscendentalFunctor));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<TranscendentalFunctor,
+                                               FORWARD,
+                                               2,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(new TranscendentalFunctor);
+
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
@@ -153,43 +155,43 @@
   // Using a smaller initial step size to overcome oscillatory function
   // behavior.
   options.ridders_relative_initial_step_size = 1e-3;
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<TranscendentalFunctor,
+                                               RIDDERS,
+                                               2,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(
+          new TranscendentalFunctor, TAKE_OWNERSHIP, 2, options);
 
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<TranscendentalFunctor,
-                                                  RIDDERS,
-                                                  2,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(
-      new TranscendentalFunctor, TAKE_OWNERSHIP, 2, options));
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
 
 TEST(NumericDiffCostFunction,
      TranscendentalCaseCostFunctionCentralDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<TranscendentalCostFunction,
-                                                  CENTRAL,
-                                                  2,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(
-      new TranscendentalCostFunction, TAKE_OWNERSHIP));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<TranscendentalCostFunction,
+                                               CENTRAL,
+                                               2,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(
+          new TranscendentalCostFunction, TAKE_OWNERSHIP);
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction,
      TranscendentalCaseCostFunctionForwardDifferences) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<TranscendentalCostFunction,
-                                                  FORWARD,
-                                                  2,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(
-      new TranscendentalCostFunction, TAKE_OWNERSHIP));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<TranscendentalCostFunction,
+                                               FORWARD,
+                                               2,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(
+          new TranscendentalCostFunction, TAKE_OWNERSHIP);
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
 }
@@ -201,14 +203,14 @@
   // behavior.
   options.ridders_relative_initial_step_size = 1e-3;
 
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<TranscendentalCostFunction,
-                                                  RIDDERS,
-                                                  2,  // number of residuals
-                                                  5,  // size of x1
-                                                  5   // size of x2
-                                                  >(
-      new TranscendentalCostFunction, TAKE_OWNERSHIP, 2, options));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<TranscendentalCostFunction,
+                                               RIDDERS,
+                                               2,  // number of residuals
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(
+          new TranscendentalCostFunction, TAKE_OWNERSHIP, 2, options);
   TranscendentalFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
 }
@@ -230,121 +232,120 @@
 // templates are instantiated for various shapes of the Jacobian
 // matrix.
 TEST(NumericDiffCostFunction, EigenRowMajorColMajorTest) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<1, 1>, CENTRAL, 1, 1>(
-          new SizeTestingCostFunction<1, 1>, ceres::TAKE_OWNERSHIP));
+  std::unique_ptr<CostFunction> cost_function = std::make_unique<
+      NumericDiffCostFunction<SizeTestingCostFunction<1, 1>, CENTRAL, 1, 1>>(
+      new SizeTestingCostFunction<1, 1>, ceres::TAKE_OWNERSHIP);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<2, 1>, CENTRAL, 2, 1>(
-          new SizeTestingCostFunction<2, 1>, ceres::TAKE_OWNERSHIP));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<SizeTestingCostFunction<2, 1>, CENTRAL, 2, 1>>(
+      new SizeTestingCostFunction<2, 1>, ceres::TAKE_OWNERSHIP);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<1, 2>, CENTRAL, 1, 2>(
-          new SizeTestingCostFunction<1, 2>, ceres::TAKE_OWNERSHIP));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<SizeTestingCostFunction<1, 2>, CENTRAL, 1, 2>>(
+      new SizeTestingCostFunction<1, 2>, ceres::TAKE_OWNERSHIP);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<SizeTestingCostFunction<2, 2>, CENTRAL, 2, 2>(
-          new SizeTestingCostFunction<2, 2>, ceres::TAKE_OWNERSHIP));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<SizeTestingCostFunction<2, 2>, CENTRAL, 2, 2>>(
+      new SizeTestingCostFunction<2, 2>, ceres::TAKE_OWNERSHIP);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>(
-          new EasyFunctor, TAKE_OWNERSHIP, 1));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>>(
+      new EasyFunctor, TAKE_OWNERSHIP, 1);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>(
-          new EasyFunctor, TAKE_OWNERSHIP, 2));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>>(
+      new EasyFunctor, TAKE_OWNERSHIP, 2);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 2>(
-          new EasyFunctor, TAKE_OWNERSHIP, 1));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 2>>(
+      new EasyFunctor, TAKE_OWNERSHIP, 1);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 2>(
-          new EasyFunctor, TAKE_OWNERSHIP, 2));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 2>>(
+      new EasyFunctor, TAKE_OWNERSHIP, 2);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 2, 1>(
-          new EasyFunctor, TAKE_OWNERSHIP, 1));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 2, 1>>(
+      new EasyFunctor, TAKE_OWNERSHIP, 1);
 
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 2, 1>(
-          new EasyFunctor, TAKE_OWNERSHIP, 2));
+  cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 2, 1>>(
+      new EasyFunctor, TAKE_OWNERSHIP, 2);
 }
 
 TEST(NumericDiffCostFunction,
      EasyCaseFunctorCentralDifferencesAndDynamicNumResiduals) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<EasyFunctor,
-                                  CENTRAL,
-                                  ceres::DYNAMIC,
-                                  5,  // size of x1
-                                  5   // size of x2
-                                  >(new EasyFunctor, TAKE_OWNERSHIP, 3));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<EasyFunctor,
+                                               CENTRAL,
+                                               ceres::DYNAMIC,
+                                               5,  // size of x1
+                                               5   // size of x2
+                                               >>(
+          new EasyFunctor, TAKE_OWNERSHIP, 3);
   EasyFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
 }
 
 TEST(NumericDiffCostFunction, ExponentialFunctorRidders) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<ExponentialFunctor,
-                                                  RIDDERS,
-                                                  1,  // number of residuals
-                                                  1   // size of x1
-                                                  >(new ExponentialFunctor));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<ExponentialFunctor,
+                                               RIDDERS,
+                                               1,  // number of residuals
+                                               1   // size of x1
+                                               >>(new ExponentialFunctor);
   ExponentialFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
 
 TEST(NumericDiffCostFunction, ExponentialCostFunctionRidders) {
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(
-      new NumericDiffCostFunction<ExponentialCostFunction,
-                                  RIDDERS,
-                                  1,  // number of residuals
-                                  1   // size of x1
-                                  >(new ExponentialCostFunction));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<ExponentialCostFunction,
+                                               RIDDERS,
+                                               1,  // number of residuals
+                                               1   // size of x1
+                                               >>(new ExponentialCostFunction);
   ExponentialFunctor functor;
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
 
 TEST(NumericDiffCostFunction, RandomizedFunctorRidders) {
-  std::unique_ptr<CostFunction> cost_function;
   NumericDiffOptions options;
   // Larger initial step size is chosen to produce robust results in the
   // presence of random noise.
   options.ridders_relative_initial_step_size = 10.0;
 
-  cost_function.reset(new NumericDiffCostFunction<RandomizedFunctor,
-                                                  RIDDERS,
-                                                  1,  // number of residuals
-                                                  1   // size of x1
-                                                  >(
-      new RandomizedFunctor(kNoiseFactor, kRandomSeed),
-      TAKE_OWNERSHIP,
-      1,
-      options));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<RandomizedFunctor,
+                                               RIDDERS,
+                                               1,  // number of residuals
+                                               1   // size of x1
+                                               >>(
+          new RandomizedFunctor(kNoiseFactor, kRandomSeed),
+          TAKE_OWNERSHIP,
+          1,
+          options);
   RandomizedFunctor functor(kNoiseFactor, kRandomSeed);
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
 
 TEST(NumericDiffCostFunction, RandomizedCostFunctionRidders) {
-  std::unique_ptr<CostFunction> cost_function;
   NumericDiffOptions options;
   // Larger initial step size is chosen to produce robust results in the
   // presence of random noise.
   options.ridders_relative_initial_step_size = 10.0;
 
-  cost_function.reset(new NumericDiffCostFunction<RandomizedCostFunction,
-                                                  RIDDERS,
-                                                  1,  // number of residuals
-                                                  1   // size of x1
-                                                  >(
-      new RandomizedCostFunction(kNoiseFactor, kRandomSeed),
-      TAKE_OWNERSHIP,
-      1,
-      options));
+  auto cost_function =
+      std::make_unique<NumericDiffCostFunction<RandomizedCostFunction,
+                                               RIDDERS,
+                                               1,  // number of residuals
+                                               1   // size of x1
+                                               >>(
+          new RandomizedCostFunction(kNoiseFactor, kRandomSeed),
+          TAKE_OWNERSHIP,
+          1,
+          options);
+
   RandomizedFunctor functor(kNoiseFactor, kRandomSeed);
   functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
 }
@@ -363,15 +364,15 @@
   double* parameters[] = {&parameter};
   double* jacobians[] = {jacobian};
 
-  std::unique_ptr<CostFunction> cost_function(
-      new NumericDiffCostFunction<OnlyFillsOneOutputFunctor, CENTRAL, 2, 1>(
-          new OnlyFillsOneOutputFunctor));
+  auto cost_function = std::make_unique<
+      NumericDiffCostFunction<OnlyFillsOneOutputFunctor, CENTRAL, 2, 1>>(
+      new OnlyFillsOneOutputFunctor);
   InvalidateArray(2, jacobian);
   InvalidateArray(2, residuals);
   EXPECT_TRUE(cost_function->Evaluate(parameters, residuals, jacobians));
   EXPECT_FALSE(IsArrayValid(2, residuals));
   InvalidateArray(2, residuals);
-  EXPECT_TRUE(cost_function->Evaluate(parameters, residuals, NULL));
+  EXPECT_TRUE(cost_function->Evaluate(parameters, residuals, nullptr));
   // We are only testing residuals here, because the Jacobians are
   // computed using finite differencing from the residuals, so unless
   // we introduce a validation step after every evaluation of
@@ -385,12 +386,9 @@
   constexpr int kX1 = 5;
   constexpr int kX2 = 5;
 
-  std::unique_ptr<CostFunction> cost_function;
-  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
-                                                  CENTRAL,
-                                                  kNumResiduals,
-                                                  kX1,
-                                                  kX2>(new EasyFunctor));
+  auto cost_function = std::make_unique<
+      NumericDiffCostFunction<EasyFunctor, CENTRAL, kNumResiduals, kX1, kX2>>(
+      new EasyFunctor);
 
   // Prepare the parameters and residuals.
   std::array<double, kX1> x1{1e-64, 2.0, 3.0, 4.0, 5.0};
diff --git a/internal/ceres/parallel_for_cxx.cc b/internal/ceres/parallel_for_cxx.cc
index 4da40c0..a697d80 100644
--- a/internal/ceres/parallel_for_cxx.cc
+++ b/internal/ceres/parallel_for_cxx.cc
@@ -125,7 +125,7 @@
                  int num_threads,
                  const std::function<void(int)>& function) {
   CHECK_GT(num_threads, 0);
-  CHECK(context != NULL);
+  CHECK(context != nullptr);
   if (end <= start) {
     return;
   }
@@ -167,7 +167,7 @@
                  int num_threads,
                  const std::function<void(int thread_id, int i)>& function) {
   CHECK_GT(num_threads, 0);
-  CHECK(context != NULL);
+  CHECK(context != nullptr);
   if (end <= start) {
     return;
   }
diff --git a/internal/ceres/parallel_for_nothreads.cc b/internal/ceres/parallel_for_nothreads.cc
index d036569..7931445 100644
--- a/internal/ceres/parallel_for_nothreads.cc
+++ b/internal/ceres/parallel_for_nothreads.cc
@@ -47,7 +47,7 @@
                  int num_threads,
                  const std::function<void(int)>& function) {
   CHECK_GT(num_threads, 0);
-  CHECK(context != NULL);
+  CHECK(context != nullptr);
   if (end <= start) {
     return;
   }
@@ -62,7 +62,7 @@
                  int num_threads,
                  const std::function<void(int thread_id, int i)>& function) {
   CHECK_GT(num_threads, 0);
-  CHECK(context != NULL);
+  CHECK(context != nullptr);
   if (end <= start) {
     return;
   }
diff --git a/internal/ceres/parallel_for_openmp.cc b/internal/ceres/parallel_for_openmp.cc
index eb9d905..882f244 100644
--- a/internal/ceres/parallel_for_openmp.cc
+++ b/internal/ceres/parallel_for_openmp.cc
@@ -50,7 +50,7 @@
                  int num_threads,
                  const std::function<void(int)>& function) {
   CHECK_GT(num_threads, 0);
-  CHECK(context != NULL);
+  CHECK(context != nullptr);
   if (end <= start) {
     return;
   }
@@ -69,7 +69,7 @@
                  int end,
                  int num_threads,
                  const std::function<void(int thread_id, int i)>& function) {
-  CHECK(context != NULL);
+  CHECK(context != nullptr);
 
   ThreadTokenProvider thread_token_provider(num_threads);
   ParallelFor(context, start, end, num_threads, [&](int i) {
diff --git a/internal/ceres/parameter_block.h b/internal/ceres/parameter_block.h
index 824745c..304b769 100644
--- a/internal/ceres/parameter_block.h
+++ b/internal/ceres/parameter_block.h
@@ -166,7 +166,7 @@
 
     if (new_manifold == nullptr) {
       manifold_ = nullptr;
-      plus_jacobian_.reset(nullptr);
+      plus_jacobian_ = nullptr;
       return;
     }
 
@@ -180,8 +180,8 @@
         << "non-negative dimensional tangent space.";
 
     manifold_ = new_manifold;
-    plus_jacobian_.reset(
-        new double[manifold_->AmbientSize() * manifold_->TangentSize()]);
+    plus_jacobian_ = std::make_unique<double[]>(manifold_->AmbientSize() *
+                                                manifold_->TangentSize());
     CHECK(UpdatePlusJacobian())
         << "Manifold::PlusJacobian computation failed for x: "
         << ConstVectorRef(state_, Size()).transpose();
@@ -195,7 +195,7 @@
     }
 
     if (!upper_bounds_) {
-      upper_bounds_.reset(new double[size_]);
+      upper_bounds_ = std::make_unique<double[]>(size_);
       std::fill(upper_bounds_.get(),
                 upper_bounds_.get() + size_,
                 std::numeric_limits<double>::max());
@@ -212,7 +212,7 @@
     }
 
     if (!lower_bounds_) {
-      lower_bounds_.reset(new double[size_]);
+      lower_bounds_ = std::make_unique<double[]>(size_);
       std::fill(lower_bounds_.get(),
                 lower_bounds_.get() + size_,
                 -std::numeric_limits<double>::max());
@@ -269,7 +269,7 @@
     CHECK(residual_blocks_.get() == nullptr)
         << "Ceres bug: There is already a residual block collection "
         << "for parameter block: " << ToString();
-    residual_blocks_.reset(new ResidualBlockSet);
+    residual_blocks_ = std::make_unique<ResidualBlockSet>();
   }
 
   void AddResidualBlock(ResidualBlock* residual_block) {
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
index 9899c24..d16e6dd 100644
--- a/internal/ceres/parameter_block_ordering.cc
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -163,7 +163,7 @@
                           vector<int>* group_sizes) {
   CHECK(group_sizes != nullptr);
   group_sizes->clear();
-  if (ordering == NULL) {
+  if (ordering == nullptr) {
     return;
   }
 
diff --git a/internal/ceres/parameter_block_ordering_test.cc b/internal/ceres/parameter_block_ordering_test.cc
index 1078893..fd24839 100644
--- a/internal/ceres/parameter_block_ordering_test.cc
+++ b/internal/ceres/parameter_block_ordering_test.cc
@@ -71,12 +71,12 @@
     problem_.AddParameterBlock(z_, 5);
     problem_.AddParameterBlock(w_, 6);
 
-    problem_.AddResidualBlock(new DummyCostFunction<2, 3>, NULL, x_);
-    problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, NULL, z_, y_);
-    problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, NULL, x_, z_);
-    problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, NULL, z_, x_);
+    problem_.AddResidualBlock(new DummyCostFunction<2, 3>, nullptr, x_);
+    problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, nullptr, z_, y_);
+    problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, nullptr, x_, z_);
+    problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, nullptr, z_, x_);
     problem_.AddResidualBlock(
-        new DummyCostFunction<1, 5, 3, 6>, NULL, z_, x_, w_);
+        new DummyCostFunction<1, 5, 3, 6>, nullptr, z_, x_, w_);
   }
 
   ProblemImpl problem_;
diff --git a/internal/ceres/partitioned_matrix_view.cc b/internal/ceres/partitioned_matrix_view.cc
index b67bc90..595b701 100644
--- a/internal/ceres/partitioned_matrix_view.cc
+++ b/internal/ceres/partitioned_matrix_view.cc
@@ -45,137 +45,137 @@
 namespace ceres {
 namespace internal {
 
-PartitionedMatrixViewBase* PartitionedMatrixViewBase::Create(
+std::unique_ptr<PartitionedMatrixViewBase> PartitionedMatrixViewBase::Create(
     const LinearSolver::Options& options, const BlockSparseMatrix& matrix) {
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2) &&
      (options.f_block_size == 2)) {
-    return new PartitionedMatrixView<2, 2, 2>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,2, 2>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2) &&
      (options.f_block_size == 3)) {
-    return new PartitionedMatrixView<2, 2, 3>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,2, 3>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2) &&
      (options.f_block_size == 4)) {
-    return new PartitionedMatrixView<2, 2, 4>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,2, 4>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2)) {
-    return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,2, Eigen::Dynamic>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 3)) {
-    return new PartitionedMatrixView<2, 3, 3>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,3, 3>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 4)) {
-    return new PartitionedMatrixView<2, 3, 4>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,3, 4>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 6)) {
-    return new PartitionedMatrixView<2, 3, 6>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,3, 6>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 9)) {
-    return new PartitionedMatrixView<2, 3, 9>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,3, 9>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3)) {
-    return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,3, Eigen::Dynamic>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 3)) {
-    return new PartitionedMatrixView<2, 4, 3>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,4, 3>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 4)) {
-    return new PartitionedMatrixView<2, 4, 4>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,4, 4>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 6)) {
-    return new PartitionedMatrixView<2, 4, 6>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,4, 6>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 8)) {
-    return new PartitionedMatrixView<2, 4, 8>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,4, 8>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 9)) {
-    return new PartitionedMatrixView<2, 4, 9>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,4, 9>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4)) {
-    return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,4, Eigen::Dynamic>>(
+                   matrix, options.elimination_groups[0]);
   }
   if (options.row_block_size == 2) {
-    return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<2,Eigen::Dynamic, Eigen::Dynamic>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 3) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 3)) {
-    return new PartitionedMatrixView<3, 3, 3>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<3,3, 3>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 2)) {
-    return new PartitionedMatrixView<4, 4, 2>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<4,4, 2>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 3)) {
-    return new PartitionedMatrixView<4, 4, 3>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<4,4, 3>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 4)) {
-    return new PartitionedMatrixView<4, 4, 4>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<4,4, 4>>(
+                   matrix, options.elimination_groups[0]);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4)) {
-    return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(matrix,
-                                              options.elimination_groups[0]);
+    return std::make_unique<PartitionedMatrixView<4,4, Eigen::Dynamic>>(
+                   matrix, options.elimination_groups[0]);
   }
 
 #endif
   VLOG(1) << "Template specializations not found for <"
           << options.row_block_size << "," << options.e_block_size << ","
           << options.f_block_size << ">";
-  return new PartitionedMatrixView<Eigen::Dynamic,
-                                   Eigen::Dynamic,
-                                   Eigen::Dynamic>(
+  return std::make_unique<PartitionedMatrixView<Eigen::Dynamic,
+                                                Eigen::Dynamic,
+                                                Eigen::Dynamic>>(
       matrix, options.elimination_groups[0]);
 };
 
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
index 4761050..36cf0d4 100644
--- a/internal/ceres/partitioned_matrix_view.h
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -77,11 +77,11 @@
   virtual void RightMultiplyF(const double* x, double* y) const = 0;
 
   // Create and return the block diagonal of the matrix E'E.
-  virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const = 0;
+  virtual std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalEtE() const = 0;
 
   // Create and return the block diagonal of the matrix F'F. Caller
   // owns the result.
-  virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const = 0;
+  virtual std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalFtF() const = 0;
 
   // Compute the block diagonal of the matrix E'E and store it in
   // block_diagonal. The matrix block_diagonal is expected to have a
@@ -108,8 +108,8 @@
   virtual int num_cols()         const = 0;
   // clang-format on
 
-  static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options,
-                                           const BlockSparseMatrix& matrix);
+  static std::unique_ptr<PartitionedMatrixViewBase> Create(
+      const LinearSolver::Options& options, const BlockSparseMatrix& matrix);
 };
 
 template <int kRowBlockSize = Eigen::Dynamic,
@@ -126,8 +126,8 @@
   void LeftMultiplyF(const double* x, double* y) const final;
   void RightMultiplyE(const double* x, double* y) const final;
   void RightMultiplyF(const double* x, double* y) const final;
-  BlockSparseMatrix* CreateBlockDiagonalEtE() const final;
-  BlockSparseMatrix* CreateBlockDiagonalFtF() const final;
+  std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalEtE() const final;
+  std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalFtF() const final;
   void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const final;
   void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const final;
   // clang-format off
@@ -140,8 +140,8 @@
   // clang-format on
 
  private:
-  BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
-                                                     int end_col_block) const;
+  std::unique_ptr<BlockSparseMatrix> CreateBlockDiagonalMatrixLayout(
+      int start_col_block, int end_col_block) const;
 
   const BlockSparseMatrix& matrix_;
   int num_row_blocks_e_;
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
index 22594ef..4dd8d70 100644
--- a/internal/ceres/partitioned_matrix_view_impl.h
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -235,7 +235,7 @@
 // and return a BlockSparseMatrix with the this block structure. The
 // caller owns the result.
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix*
+std::unique_ptr<BlockSparseMatrix>
 PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
     CreateBlockDiagonalMatrixLayout(int start_col_block,
                                     int end_col_block) const {
@@ -270,28 +270,27 @@
 
   // Build a BlockSparseMatrix with the just computed block
   // structure.
-  return new BlockSparseMatrix(block_diagonal_structure);
+  return std::make_unique<BlockSparseMatrix>(block_diagonal_structure);
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix* PartitionedMatrixView<kRowBlockSize,
-                                         kEBlockSize,
-                                         kFBlockSize>::CreateBlockDiagonalEtE()
-    const {
-  BlockSparseMatrix* block_diagonal =
+std::unique_ptr<BlockSparseMatrix>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    CreateBlockDiagonalEtE() const {
+  std::unique_ptr<BlockSparseMatrix> block_diagonal =
       CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
-  UpdateBlockDiagonalEtE(block_diagonal);
+  UpdateBlockDiagonalEtE(block_diagonal.get());
   return block_diagonal;
 }
 
 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix* PartitionedMatrixView<kRowBlockSize,
-                                         kEBlockSize,
-                                         kFBlockSize>::CreateBlockDiagonalFtF()
-    const {
-  BlockSparseMatrix* block_diagonal = CreateBlockDiagonalMatrixLayout(
-      num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
-  UpdateBlockDiagonalFtF(block_diagonal);
+std::unique_ptr<BlockSparseMatrix>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+    CreateBlockDiagonalFtF() const {
+  std::unique_ptr<BlockSparseMatrix> block_diagonal =
+      CreateBlockDiagonalMatrixLayout(num_col_blocks_e_,
+                                      num_col_blocks_e_ + num_col_blocks_f_);
+  UpdateBlockDiagonalFtF(block_diagonal.get());
   return block_diagonal;
 }
 
diff --git a/internal/ceres/partitioned_matrix_view_template.py b/internal/ceres/partitioned_matrix_view_template.py
index 05a25bf..9ab56cf 100644
--- a/internal/ceres/partitioned_matrix_view_template.py
+++ b/internal/ceres/partitioned_matrix_view_template.py
@@ -128,21 +128,21 @@
 namespace ceres {
 namespace internal {
 
-PartitionedMatrixViewBase* PartitionedMatrixViewBase::Create(
+std::unique_ptr<PartitionedMatrixViewBase> PartitionedMatrixViewBase::Create(
     const LinearSolver::Options& options, const BlockSparseMatrix& matrix) {
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 """
-FACTORY = """  return new PartitionedMatrixView<%s, %s, %s>(matrix,
-                                              options.elimination_groups[0]);"""
+FACTORY = """  return std::make_unique<PartitionedMatrixView<%s,%s, %s>>(
+                   matrix, options.elimination_groups[0]);"""
 
 FACTORY_FOOTER = """
 #endif
   VLOG(1) << "Template specializations not found for <"
           << options.row_block_size << "," << options.e_block_size << ","
           << options.f_block_size << ">";
-  return new PartitionedMatrixView<Eigen::Dynamic,
-                                   Eigen::Dynamic,
-                                   Eigen::Dynamic>(
+  return std::make_unique<PartitionedMatrixView<Eigen::Dynamic,
+                                                Eigen::Dynamic,
+                                                Eigen::Dynamic>>(
       matrix, options.elimination_groups[0]);
 };
 
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
index b66d0b8..d01dd3b 100644
--- a/internal/ceres/partitioned_matrix_view_test.cc
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -51,18 +51,18 @@
  protected:
   void SetUp() final {
     srand(5);
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(2));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(2);
     CHECK(problem != nullptr);
-    A_.reset(problem->A.release());
+    A_ = std::move(problem->A);
 
     num_cols_ = A_->num_cols();
     num_rows_ = A_->num_rows();
     num_eliminate_blocks_ = problem->num_eliminate_blocks;
     LinearSolver::Options options;
     options.elimination_groups.push_back(num_eliminate_blocks_);
-    pmv_.reset(PartitionedMatrixViewBase::Create(
-        options, *down_cast<BlockSparseMatrix*>(A_.get())));
+    pmv_ = PartitionedMatrixViewBase::Create(
+        options, *down_cast<BlockSparseMatrix*>(A_.get()));
   }
 
   int num_rows_;
diff --git a/internal/ceres/polynomial.cc b/internal/ceres/polynomial.cc
index 20812f4..feb1222 100644
--- a/internal/ceres/polynomial.cc
+++ b/internal/ceres/polynomial.cc
@@ -128,12 +128,12 @@
                                Vector* real,
                                Vector* imaginary) {
   CHECK_EQ(polynomial.size(), 2);
-  if (real != NULL) {
+  if (real != nullptr) {
     real->resize(1);
     (*real)(0) = -polynomial(1) / polynomial(0);
   }
 
-  if (imaginary != NULL) {
+  if (imaginary != nullptr) {
     imaginary->setZero(1);
   }
 }
@@ -147,16 +147,16 @@
   const double c = polynomial(2);
   const double D = b * b - 4 * a * c;
   const double sqrt_D = sqrt(fabs(D));
-  if (real != NULL) {
+  if (real != nullptr) {
     real->setZero(2);
   }
-  if (imaginary != NULL) {
+  if (imaginary != nullptr) {
     imaginary->setZero(2);
   }
 
   // Real roots.
   if (D >= 0) {
-    if (real != NULL) {
+    if (real != nullptr) {
       // Stable quadratic roots according to BKP Horn.
       // http://people.csail.mit.edu/bkph/articles/Quadratics.pdf
       if (b >= 0) {
@@ -171,11 +171,11 @@
   }
 
   // Use the normal quadratic formula for the complex case.
-  if (real != NULL) {
+  if (real != nullptr) {
     (*real)(0) = -b / (2.0 * a);
     (*real)(1) = -b / (2.0 * a);
   }
-  if (imaginary != NULL) {
+  if (imaginary != nullptr) {
     (*imaginary)(0) = sqrt_D / (2.0 * a);
     (*imaginary)(1) = -sqrt_D / (2.0 * a);
   }
@@ -240,14 +240,14 @@
   }
 
   // Output roots
-  if (real != NULL) {
+  if (real != nullptr) {
     *real = solver.eigenvalues().real();
   } else {
-    LOG(WARNING) << "NULL pointer passed as real argument to "
+    LOG(WARNING) << "nullptr pointer passed as real argument to "
                  << "FindPolynomialRoots. Real parts of the roots will not "
                  << "be returned.";
   }
-  if (imaginary != NULL) {
+  if (imaginary != nullptr) {
     *imaginary = solver.eigenvalues().imag();
   }
   return true;
@@ -304,7 +304,7 @@
 
   const Vector derivative = DifferentiatePolynomial(polynomial);
   Vector roots_real;
-  if (!FindPolynomialRoots(derivative, &roots_real, NULL)) {
+  if (!FindPolynomialRoots(derivative, &roots_real, nullptr)) {
     LOG(WARNING) << "Unable to find the critical points of "
                  << "the interpolating polynomial.";
     return;
diff --git a/internal/ceres/polynomial.h b/internal/ceres/polynomial.h
index 20071f2..3d43284 100644
--- a/internal/ceres/polynomial.h
+++ b/internal/ceres/polynomial.h
@@ -64,8 +64,8 @@
 // Failure indicates that the polynomial is invalid (of size 0) or
 // that the eigenvalues of the companion matrix could not be computed.
 // On failure, a more detailed message will be written to LOG(ERROR).
-// If real is not NULL, the real parts of the roots will be returned in it.
-// Likewise, if imaginary is not NULL, imaginary parts will be returned in it.
+// If real is not nullptr, the real parts of the roots will be returned in it.
+// Likewise, if imaginary is not nullptr, imaginary parts will be returned in it.
 CERES_EXPORT_INTERNAL bool FindPolynomialRoots(const Vector& polynomial,
                                                Vector* real,
                                                Vector* imaginary);
diff --git a/internal/ceres/polynomial_test.cc b/internal/ceres/polynomial_test.cc
index 0ff73ea..c45557e 100644
--- a/internal/ceres/polynomial_test.cc
+++ b/internal/ceres/polynomial_test.cc
@@ -88,8 +88,8 @@
 }
 
 // Run a test with the polynomial defined by the N real roots in roots_real.
-// If use_real is false, NULL is passed as the real argument to
-// FindPolynomialRoots. If use_imaginary is false, NULL is passed as the
+// If use_real is false, nullptr is passed as the real argument to
+// FindPolynomialRoots. If use_imaginary is false, nullptr is passed as the
 // imaginary argument to FindPolynomialRoots.
 template <int N>
 void RunPolynomialTestRealRoots(const double (&real_roots)[N],
@@ -102,8 +102,8 @@
   for (int i = 0; i < N; ++i) {
     poly = AddRealRoot(poly, real_roots[i]);
   }
-  Vector* const real_ptr = use_real ? &real : NULL;
-  Vector* const imaginary_ptr = use_imaginary ? &imaginary : NULL;
+  Vector* const real_ptr = use_real ? &real : nullptr;
+  Vector* const imaginary_ptr = use_imaginary ? &imaginary : nullptr;
   bool success = FindPolynomialRoots(poly, real_ptr, imaginary_ptr);
 
   EXPECT_EQ(success, true);
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index 6a6bc61..e0eb88d 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -126,7 +126,7 @@
   // for some vector b. It is important that the matrix A have the
   // same block structure as the one used to construct this object.
   //
-  // D can be NULL, in which case its interpreted as a diagonal matrix
+  // D can be nullptr, in which case its interpreted as a diagonal matrix
   // of size zero.
   virtual bool Update(const LinearOperator& A, const double* D) = 0;
 
diff --git a/internal/ceres/preprocessor.cc b/internal/ceres/preprocessor.cc
index 097ac64..1fcd74a 100644
--- a/internal/ceres/preprocessor.cc
+++ b/internal/ceres/preprocessor.cc
@@ -51,7 +51,7 @@
   }
 
   LOG(FATAL) << "Unknown minimizer_type: " << minimizer_type;
-  return NULL;
+  return nullptr;
 }
 
 Preprocessor::~Preprocessor() = default;
@@ -85,15 +85,15 @@
   minimizer_options.evaluator = pp->evaluator;
 
   if (options.logging_type != SILENT) {
-    pp->logging_callback.reset(new LoggingCallback(
-        options.minimizer_type, options.minimizer_progress_to_stdout));
+    pp->logging_callback = std::make_unique<LoggingCallback>(
+        options.minimizer_type, options.minimizer_progress_to_stdout);
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
                                        pp->logging_callback.get());
   }
 
   if (options.update_state_every_iteration) {
-    pp->state_updating_callback.reset(
-        new StateUpdatingCallback(program, reduced_parameters));
+    pp->state_updating_callback =
+        std::make_unique<StateUpdatingCallback>(program, reduced_parameters);
     // This must get pushed to the front of the callbacks so that it
     // is run before any of the user callbacks.
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index a49cd5e..f256aab 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -763,8 +763,8 @@
 
   std::unique_ptr<CompressedRowSparseMatrix> tmp_jacobian;
   if (jacobian != nullptr) {
-    tmp_jacobian.reset(
-        down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian()));
+    tmp_jacobian.reset(down_cast<CompressedRowSparseMatrix*>(
+        evaluator->CreateJacobian().release()));
   }
 
   // Point the state pointers to the user state pointers. This is
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
index 483ae07..1987e13 100644
--- a/internal/ceres/problem_test.cc
+++ b/internal/ceres/problem_test.cc
@@ -429,7 +429,7 @@
   DynamicProblem() {
     Problem::Options options;
     options.enable_fast_removal = GetParam();
-    problem.reset(new ProblemImpl(options));
+    problem = std::make_unique<ProblemImpl>(options);
   }
 
   ParameterBlock* GetParameterBlock(int block) {
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
index 516d58d..adfff0b 100644
--- a/internal/ceres/program.cc
+++ b/internal/ceres/program.cc
@@ -279,7 +279,7 @@
   return true;
 }
 
-Program* Program::CreateReducedProgram(
+std::unique_ptr<Program> Program::CreateReducedProgram(
     std::vector<double*>* removed_parameter_blocks,
     double* fixed_cost,
     std::string* error) const {
@@ -287,14 +287,14 @@
   CHECK(fixed_cost != nullptr);
   CHECK(error != nullptr);
 
-  std::unique_ptr<Program> reduced_program(new Program(*this));
+  std::unique_ptr<Program> reduced_program = std::make_unique<Program>(*this);
   if (!reduced_program->RemoveFixedBlocks(
           removed_parameter_blocks, fixed_cost, error)) {
     return nullptr;
   }
 
   reduced_program->SetParameterOffsetsAndIndex();
-  return reduced_program.release();
+  return reduced_program;
 }
 
 bool Program::RemoveFixedBlocks(std::vector<double*>* removed_parameter_blocks,
@@ -305,8 +305,8 @@
   CHECK(error != nullptr);
 
   std::unique_ptr<double[]> residual_block_evaluate_scratch;
-  residual_block_evaluate_scratch.reset(
-      new double[MaxScratchDoublesNeededForEvaluate()]);
+  residual_block_evaluate_scratch =
+      std::make_unique<double[]>(MaxScratchDoublesNeededForEvaluate());
   *fixed_cost = 0.0;
 
   bool need_to_call_prepare_for_evaluation = evaluation_callback_ != nullptr;
diff --git a/internal/ceres/program.h b/internal/ceres/program.h
index 343cf8e..ffd5db7 100644
--- a/internal/ceres/program.h
+++ b/internal/ceres/program.h
@@ -146,12 +146,13 @@
   // fixed_cost will be equal to the sum of the costs of the residual
   // blocks that were removed.
   //
-  // If there was a problem, then the function will return a NULL
+  // If there was a problem, then the function will return a nullptr
   // pointer and error will contain a human readable description of
   // the problem.
-  Program* CreateReducedProgram(std::vector<double*>* removed_parameter_blocks,
-                                double* fixed_cost,
-                                std::string* error) const;
+  std::unique_ptr<Program> CreateReducedProgram(
+      std::vector<double*>* removed_parameter_blocks,
+      double* fixed_cost,
+      std::string* error) const;
 
   // See problem.h for what these do.
   int NumParameterBlocks() const;
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
index 4ab557f..2ab52da 100644
--- a/internal/ceres/program_evaluator.h
+++ b/internal/ceres/program_evaluator.h
@@ -59,7 +59,7 @@
 //   class JacobianWriter {
 //     // Create a jacobian that this writer can write. Same as
 //     // Evaluator::CreateJacobian.
-//     SparseMatrix* CreateJacobian() const;
+//     std::unique_ptr<SparseMatrix> CreateJacobian() const;
 //
 //     // Create num_threads evaluate preparers. Caller owns result which must
 //     // be freed with delete[]. Resulting preparers are valid while *this is.
@@ -127,12 +127,12 @@
 #endif  // CERES_NO_THREADS
 
     BuildResidualLayout(*program, &residual_layout_);
-    evaluate_scratch_.reset(
-        CreateEvaluatorScratch(*program, options.num_threads));
+    evaluate_scratch_ =
+        std::move(CreateEvaluatorScratch(*program, options.num_threads));
   }
 
   // Implementation of Evaluator interface.
-  SparseMatrix* CreateJacobian() const final {
+  std::unique_ptr<SparseMatrix> CreateJacobian() const final {
     return jacobian_writer_.CreateJacobian();
   }
 
@@ -309,13 +309,14 @@
               int max_scratch_doubles_needed_for_evaluate,
               int max_residuals_per_residual_block,
               int num_parameters) {
-      residual_block_evaluate_scratch.reset(
-          new double[max_scratch_doubles_needed_for_evaluate]);
-      gradient.reset(new double[num_parameters]);
+      residual_block_evaluate_scratch =
+          std::make_unique<double[]>(max_scratch_doubles_needed_for_evaluate);
+      gradient = std::make_unique<double[]>(num_parameters);
       VectorRef(gradient.get(), num_parameters).setZero();
-      residual_block_residuals.reset(
-          new double[max_residuals_per_residual_block]);
-      jacobian_block_ptrs.reset(new double*[max_parameters_per_residual_block]);
+      residual_block_residuals =
+          std::make_unique<double[]>(max_residuals_per_residual_block);
+      jacobian_block_ptrs =
+          std::make_unique<double*[]>(max_parameters_per_residual_block);
     }
 
     double cost;
@@ -341,8 +342,8 @@
   }
 
   // Create scratch space for each thread evaluating the program.
-  static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
-                                                 int num_threads) {
+  static std::unique_ptr<EvaluateScratch[]> CreateEvaluatorScratch(
+      const Program& program, int num_threads) {
     int max_parameters_per_residual_block =
         program.MaxParametersPerResidualBlock();
     int max_scratch_doubles_needed_for_evaluate =
@@ -351,7 +352,7 @@
         program.MaxResidualsPerResidualBlock();
     int num_parameters = program.NumEffectiveParameters();
 
-    EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
+    auto evaluate_scratch = std::make_unique<EvaluateScratch[]>(num_threads);
     for (int i = 0; i < num_threads; i++) {
       evaluate_scratch[i].Init(max_parameters_per_residual_block,
                                max_scratch_doubles_needed_for_evaluate,
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
index eefb085..2d93487 100644
--- a/internal/ceres/reorder_program.cc
+++ b/internal/ceres/reorder_program.cc
@@ -291,7 +291,7 @@
   // filling is finished, the offset pointerts should have shifted down one
   // entry (this is verified below).
   vector<ResidualBlock*> reordered_residual_blocks(
-      (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
+      (*residual_blocks).size(), static_cast<ResidualBlock*>(nullptr));
   for (int i = 0; i < residual_blocks->size(); ++i) {
     int bucket = min_position_per_residual[i];
 
@@ -299,7 +299,7 @@
     offsets[bucket]--;
 
     // Sanity.
-    CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
+    CHECK(reordered_residual_blocks[offsets[bucket]] == nullptr)
         << "Congratulations, you found a Ceres bug! Please report this error "
         << "to the developers.";
 
@@ -313,9 +313,9 @@
         << "Congratulations, you found a Ceres bug! Please report this error "
         << "to the developers.";
   }
-  // Sanity check #2: No NULL's left behind.
+  // Sanity check #2: No nullptr's left behind.
   for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
-    CHECK(reordered_residual_blocks[i] != NULL)
+    CHECK(reordered_residual_blocks[i] != nullptr)
         << "Congratulations, you found a Ceres bug! Please report this error "
         << "to the developers.";
   }
@@ -441,7 +441,7 @@
 
   if (parameter_block_ordering->NumGroups() == 1) {
     // If the user supplied an parameter_block_ordering with just one
-    // group, it is equivalent to the user supplying NULL as an
+    // group, it is equivalent to the user supplying nullptr as an
     // parameter_block_ordering. Ceres is completely free to choose the
     // parameter block ordering as it sees fit. For Schur type solvers,
     // this means that the user wishes for Ceres to identify the
diff --git a/internal/ceres/reorder_program_test.cc b/internal/ceres/reorder_program_test.cc
index 37a07cb..427334e 100644
--- a/internal/ceres/reorder_program_test.cc
+++ b/internal/ceres/reorder_program_test.cc
@@ -78,14 +78,14 @@
   problem.AddResidualBlock(new BinaryCostFunction(), nullptr, &x, &y);
   problem.AddResidualBlock(new UnaryCostFunction(), nullptr, &y);
 
-  ParameterBlockOrdering* linear_solver_ordering = new ParameterBlockOrdering;
+  auto linear_solver_ordering = std::make_shared<ParameterBlockOrdering>();
   linear_solver_ordering->AddElementToGroup(&x, 0);
   linear_solver_ordering->AddElementToGroup(&y, 0);
   linear_solver_ordering->AddElementToGroup(&z, 1);
 
   Solver::Options options;
   options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering.reset(linear_solver_ordering);
+  options.linear_solver_ordering = linear_solver_ordering;
 
   const vector<ResidualBlock*>& residual_blocks =
       problem.program().residual_blocks();
diff --git a/internal/ceres/residual_block.h b/internal/ceres/residual_block.h
index 888a44a..80c8dd6 100644
--- a/internal/ceres/residual_block.h
+++ b/internal/ceres/residual_block.h
@@ -77,9 +77,9 @@
 
   // Evaluates the residual term, storing the scalar cost in *cost, the residual
   // components in *residuals, and the jacobians between the parameters and
-  // residuals in jacobians[i], in row-major order. If residuals is NULL, the
-  // residuals are not computed. If jacobians is NULL, no jacobians are
-  // computed. If jacobians[i] is NULL, then the jacobian for that parameter is
+  // residuals in jacobians[i], in row-major order. If residuals is nullptr, the
+  // residuals are not computed. If jacobians is nullptr, no jacobians are
+  // computed. If jacobians[i] is nullptr, then the jacobian for that parameter is
   // not computed.
   //
   // cost must not be null.
diff --git a/internal/ceres/residual_block_test.cc b/internal/ceres/residual_block_test.cc
index d587058..f01d842 100644
--- a/internal/ceres/residual_block_test.cc
+++ b/internal/ceres/residual_block_test.cc
@@ -64,7 +64,7 @@
     }
     if (jacobians) {
       for (int k = 0; k < 3; ++k) {
-        if (jacobians[k] != NULL) {
+        if (jacobians[k] != nullptr) {
           MatrixRef jacobian(
               jacobians[k], num_residuals(), parameter_block_sizes()[k]);
           jacobian.setConstant(k);
@@ -96,11 +96,11 @@
   TernaryCostFunction cost_function(3, 2, 3, 4);
 
   // Create the object under tests.
-  ResidualBlock residual_block(&cost_function, NULL, parameters, -1);
+  ResidualBlock residual_block(&cost_function, nullptr, parameters, -1);
 
   // Verify getters.
   EXPECT_EQ(&cost_function, residual_block.cost_function());
-  EXPECT_EQ(NULL, residual_block.loss_function());
+  EXPECT_EQ(nullptr, residual_block.loss_function());
   EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
   EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
   EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
@@ -108,12 +108,12 @@
 
   // Verify cost-only evaluation.
   double cost;
-  residual_block.Evaluate(true, &cost, NULL, NULL, scratch);
+  residual_block.Evaluate(true, &cost, nullptr, nullptr, scratch);
   EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
 
   // Verify cost and residual evaluation.
   double residuals[3];
-  residual_block.Evaluate(true, &cost, residuals, NULL, scratch);
+  residual_block.Evaluate(true, &cost, residuals, nullptr, scratch);
   EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
@@ -151,7 +151,7 @@
   jacobian_ry.setConstant(-1.0);
   jacobian_rz.setConstant(-1.0);
 
-  jacobian_ptrs[1] = NULL;  // Don't compute the jacobian for y.
+  jacobian_ptrs[1] = nullptr;  // Don't compute the jacobian for y.
 
   residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
   EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
@@ -187,7 +187,7 @@
         //   0 1 2 3 4 ...
         //   0 1 2 3 4 ...
         //
-        if (jacobians[k] != NULL) {
+        if (jacobians[k] != nullptr) {
           MatrixRef jacobian(
               jacobians[k], num_residuals(), parameter_block_sizes()[k]);
           for (int j = 0; j < k + 2; ++j) {
@@ -233,11 +233,11 @@
   LocallyParameterizedCostFunction cost_function;
 
   // Create the object under tests.
-  ResidualBlock residual_block(&cost_function, NULL, parameters, -1);
+  ResidualBlock residual_block(&cost_function, nullptr, parameters, -1);
 
   // Verify getters.
   EXPECT_EQ(&cost_function, residual_block.cost_function());
-  EXPECT_EQ(NULL, residual_block.loss_function());
+  EXPECT_EQ(nullptr, residual_block.loss_function());
   EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
   EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
   EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
@@ -245,12 +245,12 @@
 
   // Verify cost-only evaluation.
   double cost;
-  residual_block.Evaluate(true, &cost, NULL, NULL, scratch);
+  residual_block.Evaluate(true, &cost, nullptr, nullptr, scratch);
   EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
 
   // Verify cost and residual evaluation.
   double residuals[3];
-  residual_block.Evaluate(true, &cost, residuals, NULL, scratch);
+  residual_block.Evaluate(true, &cost, residuals, nullptr, scratch);
   EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
   EXPECT_EQ(0.0, residuals[0]);
   EXPECT_EQ(1.0, residuals[1]);
@@ -311,7 +311,7 @@
   jacobian_ry.setConstant(-1.0);
   jacobian_rz.setConstant(-1.0);
 
-  jacobian_ptrs[1] = NULL;  // Don't compute the jacobian for y.
+  jacobian_ptrs[1] = nullptr;  // Don't compute the jacobian for y.
 
   residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
   EXPECT_EQ(0.5 * (0 * 0 + 1 * 1 + 2 * 2), cost);
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
index d5b3fa1..17cf619 100644
--- a/internal/ceres/residual_block_utils.cc
+++ b/internal/ceres/residual_block_utils.cc
@@ -56,7 +56,7 @@
 
   InvalidateArray(1, cost);
   InvalidateArray(num_residuals, residuals);
-  if (jacobians != NULL) {
+  if (jacobians != nullptr) {
     for (int i = 0; i < num_parameter_blocks; ++i) {
       const int parameter_block_size = block.parameter_blocks()[i]->Size();
       InvalidateArray(num_residuals * parameter_block_size, jacobians[i]);
@@ -104,9 +104,9 @@
       StringAppendF(&result, "| ");
       for (int k = 0; k < num_residuals; ++k) {
         AppendArrayToString(1,
-                            (jacobians != NULL && jacobians[i] != NULL)
+                            (jacobians != nullptr && jacobians[i] != nullptr)
                                 ? jacobians[i] + k * parameter_block_size + j
-                                : NULL,
+                                : nullptr,
                             &result);
       }
       StringAppendF(&result, "\n");
@@ -129,7 +129,7 @@
     return false;
   }
 
-  if (jacobians != NULL) {
+  if (jacobians != nullptr) {
     for (int i = 0; i < num_parameter_blocks; ++i) {
       const int parameter_block_size = block.parameter_blocks()[i]->Size();
       if (!IsArrayValid(num_residuals * parameter_block_size, jacobians[i])) {
diff --git a/internal/ceres/residual_block_utils.h b/internal/ceres/residual_block_utils.h
index 41ae81a..1ffff8e 100644
--- a/internal/ceres/residual_block_utils.h
+++ b/internal/ceres/residual_block_utils.h
@@ -52,7 +52,7 @@
 
 class ResidualBlock;
 
-// Invalidate cost, resdual and jacobian arrays (if not NULL).
+// Invalidate cost, resdual and jacobian arrays (if not nullptr).
 void InvalidateEvaluation(const ResidualBlock& block,
                           double* cost,
                           double* residuals,
diff --git a/internal/ceres/residual_block_utils_test.cc b/internal/ceres/residual_block_utils_test.cc
index 331f5ab..2254619 100644
--- a/internal/ceres/residual_block_utils_test.cc
+++ b/internal/ceres/residual_block_utils_test.cc
@@ -51,7 +51,7 @@
   std::vector<ParameterBlock*> parameter_blocks;
   parameter_blocks.push_back(&parameter_block);
 
-  ResidualBlock residual_block(&cost_function, NULL, parameter_blocks, -1);
+  ResidualBlock residual_block(&cost_function, nullptr, parameter_blocks, -1);
 
   std::unique_ptr<double[]> scratch(
       new double[residual_block.NumScratchDoublesForEvaluate()]);
@@ -74,7 +74,7 @@
                 double* residuals,
                 double** jacobians) const final {
     residuals[0] = 1;
-    if (jacobians != NULL && jacobians[0] != NULL) {
+    if (jacobians != nullptr && jacobians[0] != nullptr) {
       jacobians[0][0] = 0.0;
     }
     return true;
@@ -90,7 +90,7 @@
                 double** jacobians) const final {
     // Forget to update the residuals.
     // residuals[0] = 1;
-    if (jacobians != NULL && jacobians[0] != NULL) {
+    if (jacobians != nullptr && jacobians[0] != nullptr) {
       jacobians[0][0] = 0.0;
     }
     return true;
@@ -103,7 +103,7 @@
                 double* residuals,
                 double** jacobians) const final {
     residuals[0] = 1;
-    if (jacobians != NULL && jacobians[0] != NULL) {
+    if (jacobians != nullptr && jacobians[0] != nullptr) {
       // Forget to update the jacobians.
       // jacobians[0][0] = 0.0;
     }
@@ -117,7 +117,7 @@
                 double* residuals,
                 double** jacobians) const final {
     residuals[0] = std::numeric_limits<double>::infinity();
-    if (jacobians != NULL && jacobians[0] != NULL) {
+    if (jacobians != nullptr && jacobians[0] != nullptr) {
       jacobians[0][0] = 0.0;
     }
     return true;
@@ -130,7 +130,7 @@
                 double* residuals,
                 double** jacobians) const final {
     residuals[0] = 1.0;
-    if (jacobians != NULL && jacobians[0] != NULL) {
+    if (jacobians != nullptr && jacobians[0] != nullptr) {
       jacobians[0][0] = std::numeric_limits<double>::quiet_NaN();
     }
     return true;
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
index e28185c..951d7ca 100644
--- a/internal/ceres/rotation_test.cc
+++ b/internal/ceres/rotation_test.cc
@@ -687,7 +687,7 @@
 bool IsClose(double x, double y) {
   EXPECT_FALSE(isnan(x));
   EXPECT_FALSE(isnan(y));
-  return internal::IsClose(x, y, kTolerance, NULL, NULL);
+  return internal::IsClose(x, y, kTolerance, nullptr, nullptr);
 }
 
 }  // namespace
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index 57419c2..15c1629 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -150,9 +150,9 @@
     // mechanism that does not cause binary bloat.
     if (options_.row_block_size == 2 && options_.e_block_size == 3 &&
         options_.f_block_size == 6 && num_f_blocks == 1) {
-      eliminator_.reset(new SchurEliminatorForOneFBlock<2, 3, 6>);
+      eliminator_ = std::make_unique<SchurEliminatorForOneFBlock<2, 3, 6>>();
     } else {
-      eliminator_.reset(SchurEliminatorBase::Create(options_));
+      eliminator_ = SchurEliminatorBase::Create(options_);
     }
 
     CHECK(eliminator_);
@@ -202,8 +202,8 @@
     blocks[j] = bs->cols[i].size;
   }
 
-  set_lhs(new BlockRandomAccessDenseMatrix(blocks));
-  set_rhs(new double[lhs()->num_rows()]);
+  set_lhs(std::make_unique<BlockRandomAccessDenseMatrix>(blocks));
+  set_rhs(std::make_unique<double[]>(lhs()->num_rows()));
 }
 
 // Solve the system Sx = r, assuming that the matrix S is stored in a
@@ -309,8 +309,9 @@
     }
   }
 
-  set_lhs(new BlockRandomAccessSparseMatrix(blocks_, block_pairs));
-  set_rhs(new double[lhs()->num_rows()]);
+  set_lhs(
+      std::make_unique<BlockRandomAccessSparseMatrix>(blocks_, block_pairs));
+  set_rhs(std::make_unique<double[]>(lhs()->num_rows()));
 }
 
 LinearSolver::Summary SparseSchurComplementSolver::SolveReducedLinearSystem(
@@ -335,11 +336,10 @@
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky_->StorageType();
   if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
-    lhs.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    lhs = CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
     lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
   } else {
-    lhs.reset(
-        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+    lhs = CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm);
     lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
   }
 
@@ -371,7 +371,8 @@
   CHECK_EQ(options().preconditioner_type, SCHUR_JACOBI);
 
   if (preconditioner_.get() == nullptr) {
-    preconditioner_.reset(new BlockRandomAccessDiagonalMatrix(blocks_));
+    preconditioner_ =
+        std::make_unique<BlockRandomAccessDiagonalMatrix>(blocks_);
   }
 
   BlockRandomAccessSparseMatrix* sc =
@@ -401,10 +402,11 @@
 
   VectorRef(solution, num_rows).setZero();
 
-  std::unique_ptr<LinearOperator> lhs_adapter(
-      new BlockRandomAccessSparseMatrixAdapter(*sc));
-  std::unique_ptr<LinearOperator> preconditioner_adapter(
-      new BlockRandomAccessDiagonalMatrixAdapter(*preconditioner_));
+  std::unique_ptr<LinearOperator> lhs_adapter =
+      std::make_unique<BlockRandomAccessSparseMatrixAdapter>(*sc);
+  std::unique_ptr<LinearOperator> preconditioner_adapter =
+      std::make_unique<BlockRandomAccessDiagonalMatrixAdapter>(
+          *preconditioner_);
 
   LinearSolver::Options cg_options;
   cg_options.min_num_iterations = options().min_num_iterations;
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
index b7cb1d9..3d343bf 100644
--- a/internal/ceres/schur_complement_solver.h
+++ b/internal/ceres/schur_complement_solver.h
@@ -125,11 +125,13 @@
  protected:
   const LinearSolver::Options& options() const { return options_; }
 
-  void set_lhs(BlockRandomAccessMatrix* lhs) { lhs_.reset(lhs); }
+  void set_lhs(std::unique_ptr<BlockRandomAccessMatrix> lhs) {
+    lhs_ = std::move(lhs);
+  }
   const BlockRandomAccessMatrix* lhs() const { return lhs_.get(); }
   BlockRandomAccessMatrix* mutable_lhs() { return lhs_.get(); }
 
-  void set_rhs(double* rhs) { rhs_.reset(rhs); }
+  void set_rhs(std::unique_ptr<double[]> rhs) { rhs_ = std::move(rhs); }
   const double* rhs() const { return rhs_.get(); }
 
  private:
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index 550733e..e943ed1 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -51,13 +51,13 @@
 class SchurComplementSolverTest : public ::testing::Test {
  protected:
   void SetUpFromProblemId(int problem_id) {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(problem_id));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(problem_id);
 
     CHECK(problem != nullptr);
     A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
-    b.reset(problem->b.release());
-    D.reset(problem->D.release());
+    b = std::move(problem->b);
+    D = std::move(problem->D);
 
     num_cols = A->num_cols();
     num_rows = A->num_rows();
diff --git a/internal/ceres/schur_eliminator.cc b/internal/ceres/schur_eliminator.cc
index 613ae95..bcffbe6 100644
--- a/internal/ceres/schur_eliminator.cc
+++ b/internal/ceres/schur_eliminator.cc
@@ -45,115 +45,116 @@
 namespace ceres {
 namespace internal {
 
-SchurEliminatorBase* SchurEliminatorBase::Create(
+std::unique_ptr<SchurEliminatorBase> SchurEliminatorBase::Create(
     const LinearSolver::Options& options) {
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2) &&
      (options.f_block_size == 2)) {
-    return new SchurEliminator<2, 2, 2>(options);
+    return std::make_unique<SchurEliminator<2, 2, 2>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2) &&
      (options.f_block_size == 3)) {
-    return new SchurEliminator<2, 2, 3>(options);
+    return std::make_unique<SchurEliminator<2, 2, 3>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2) &&
      (options.f_block_size == 4)) {
-    return new SchurEliminator<2, 2, 4>(options);
+    return std::make_unique<SchurEliminator<2, 2, 4>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 2)) {
-    return new SchurEliminator<2, 2, Eigen::Dynamic>(options);
+    return std::make_unique<SchurEliminator<2, 2, Eigen::Dynamic>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 3)) {
-    return new SchurEliminator<2, 3, 3>(options);
+    return std::make_unique<SchurEliminator<2, 3, 3>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 4)) {
-    return new SchurEliminator<2, 3, 4>(options);
+    return std::make_unique<SchurEliminator<2, 3, 4>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 6)) {
-    return new SchurEliminator<2, 3, 6>(options);
+    return std::make_unique<SchurEliminator<2, 3, 6>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 9)) {
-    return new SchurEliminator<2, 3, 9>(options);
+    return std::make_unique<SchurEliminator<2, 3, 9>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 3)) {
-    return new SchurEliminator<2, 3, Eigen::Dynamic>(options);
+    return std::make_unique<SchurEliminator<2, 3, Eigen::Dynamic>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 3)) {
-    return new SchurEliminator<2, 4, 3>(options);
+    return std::make_unique<SchurEliminator<2, 4, 3>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 4)) {
-    return new SchurEliminator<2, 4, 4>(options);
+    return std::make_unique<SchurEliminator<2, 4, 4>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 6)) {
-    return new SchurEliminator<2, 4, 6>(options);
+    return std::make_unique<SchurEliminator<2, 4, 6>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 8)) {
-    return new SchurEliminator<2, 4, 8>(options);
+    return std::make_unique<SchurEliminator<2, 4, 8>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 9)) {
-    return new SchurEliminator<2, 4, 9>(options);
+    return std::make_unique<SchurEliminator<2, 4, 9>>(options);
   }
   if ((options.row_block_size == 2) &&
      (options.e_block_size == 4)) {
-    return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
+    return std::make_unique<SchurEliminator<2, 4, Eigen::Dynamic>>(options);
   }
   if (options.row_block_size == 2) {
-    return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
+    return std::make_unique<SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>>(options);
   }
   if ((options.row_block_size == 3) &&
      (options.e_block_size == 3) &&
      (options.f_block_size == 3)) {
-    return new SchurEliminator<3, 3, 3>(options);
+    return std::make_unique<SchurEliminator<3, 3, 3>>(options);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 2)) {
-    return new SchurEliminator<4, 4, 2>(options);
+    return std::make_unique<SchurEliminator<4, 4, 2>>(options);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 3)) {
-    return new SchurEliminator<4, 4, 3>(options);
+    return std::make_unique<SchurEliminator<4, 4, 3>>(options);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4) &&
      (options.f_block_size == 4)) {
-    return new SchurEliminator<4, 4, 4>(options);
+    return std::make_unique<SchurEliminator<4, 4, 4>>(options);
   }
   if ((options.row_block_size == 4) &&
      (options.e_block_size == 4)) {
-    return new SchurEliminator<4, 4, Eigen::Dynamic>(options);
+    return std::make_unique<SchurEliminator<4, 4, Eigen::Dynamic>>(options);
   }
 
 #endif
   VLOG(1) << "Template specializations not found for <"
           << options.row_block_size << "," << options.e_block_size << ","
           << options.f_block_size << ">";
-  return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
-      options);
+  return std::make_unique<SchurEliminator<Eigen::Dynamic,
+                                          Eigen::Dynamic,
+                                          Eigen::Dynamic>>(options);
 }
 
 }  // namespace internal
diff --git a/internal/ceres/schur_eliminator.h b/internal/ceres/schur_eliminator.h
index a9eada5..cf7a3e4 100644
--- a/internal/ceres/schur_eliminator.h
+++ b/internal/ceres/schur_eliminator.h
@@ -209,7 +209,8 @@
                               const double* z,
                               double* y) = 0;
   // Factory
-  static SchurEliminatorBase* Create(const LinearSolver::Options& options);
+  static std::unique_ptr<SchurEliminatorBase> Create(
+      const LinearSolver::Options& options);
 };
 
 // Templated implementation of the SchurEliminatorBase interface. The
@@ -483,7 +484,7 @@
       Eigen::Matrix<double, kFBlockSize, 1> f_t_b;
 
       // Add the square of the diagonal to e_t_e.
-      if (D != NULL) {
+      if (D != nullptr) {
         const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
             D + bs->cols[e_block_id].position, kEBlockSize);
         e_t_e = diag.array().square().matrix().asDiagonal();
diff --git a/internal/ceres/schur_eliminator_benchmark.cc b/internal/ceres/schur_eliminator_benchmark.cc
index 73bdfd9..76ccfaa 100644
--- a/internal/ceres/schur_eliminator_benchmark.cc
+++ b/internal/ceres/schur_eliminator_benchmark.cc
@@ -88,7 +88,7 @@
       }
     }
 
-    matrix_.reset(new BlockSparseMatrix(bs));
+    matrix_ = std::make_unique<BlockSparseMatrix>(bs);
     double* values = matrix_->mutable_values();
     for (int i = 0; i < matrix_->num_nonzeros(); ++i) {
       values[i] = RandNormal();
@@ -98,7 +98,7 @@
     b_.setRandom();
 
     std::vector<int> blocks(1, kFBlockSize);
-    lhs_.reset(new BlockRandomAccessDenseMatrix(blocks));
+    lhs_ = std::make_unique<BlockRandomAccessDenseMatrix>(blocks);
     diagonal_.resize(matrix_->num_cols());
     diagonal_.setOnes();
     rhs_.resize(kFBlockSize);
@@ -192,7 +192,8 @@
   }
 }
 
-static void BM_SchurEliminatorForOneFBlockBackSubstitute(benchmark::State& state) {
+static void BM_SchurEliminatorForOneFBlockBackSubstitute(
+    benchmark::State& state) {
   const int num_e_blocks = state.range(0);
   BenchmarkData data(num_e_blocks);
   SchurEliminatorForOneFBlock<2, 3, 6> eliminator;
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
index 9b2ff0e..271d4cb 100644
--- a/internal/ceres/schur_eliminator_impl.h
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -159,12 +159,13 @@
 
   uneliminated_row_begins_ = chunk.start + chunk.size;
 
-  buffer_.reset(new double[buffer_size_ * num_threads_]);
+  buffer_ = std::make_unique<double[]>(buffer_size_ * num_threads_);
 
   // chunk_outer_product_buffer_ only needs to store e_block_size *
   // f_block_size, which is always less than buffer_size_, so we just
   // allocate buffer_size_ per thread.
-  chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
+  chunk_outer_product_buffer_ =
+      std::make_unique<double[]>(buffer_size_ * num_threads_);
 
   STLDeleteElements(&rhs_locks_);
   rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
@@ -191,7 +192,7 @@
   const int num_col_blocks = bs->cols.size();
 
   // Add the diagonal to the schur complement.
-  if (D != NULL) {
+  if (D != nullptr) {
     ParallelFor(context_,
                 num_eliminate_blocks_,
                 num_col_blocks,
@@ -201,7 +202,7 @@
                   int r, c, row_stride, col_stride;
                   CellInfo* cell_info = lhs->GetCell(
                       block_id, block_id, &r, &c, &row_stride, &col_stride);
-                  if (cell_info != NULL) {
+                  if (cell_info != nullptr) {
                     const int block_size = bs->cols[i].size;
                     typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
                         D + bs->cols[i].position, block_size);
@@ -243,7 +244,7 @@
         typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size,
                                                                   e_block_size);
 
-        if (D != NULL) {
+        if (D != nullptr) {
           const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
               D + bs->cols[e_block_id].position, e_block_size);
           ete = diag.array().square().matrix().asDiagonal();
@@ -325,7 +326,7 @@
 
     typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size,
                                                               e_block_size);
-    if (D != NULL) {
+    if (D != nullptr) {
       const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
           D + bs->cols[e_block_id].position, e_block_size);
       ete = diag.array().square().matrix().asDiagonal();
@@ -547,7 +548,7 @@
       int r, c, row_stride, col_stride;
       CellInfo* cell_info =
           lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
-      if (cell_info != NULL) {
+      if (cell_info != nullptr) {
         const int block2_size = bs->cols[it2->first].size;
         std::lock_guard<std::mutex> l(cell_info->m);
         // clang-format off
@@ -625,7 +626,7 @@
     int r, c, row_stride, col_stride;
     CellInfo* cell_info =
         lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride);
-    if (cell_info != NULL) {
+    if (cell_info != nullptr) {
       std::lock_guard<std::mutex> l(cell_info->m);
       // This multiply currently ignores the fact that this is a
       // symmetric outer product.
@@ -645,7 +646,7 @@
       int r, c, row_stride, col_stride;
       CellInfo* cell_info =
           lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
-      if (cell_info != NULL) {
+      if (cell_info != nullptr) {
         const int block2_size = bs->cols[row.cells[j].block_id].size;
         std::lock_guard<std::mutex> l(cell_info->m);
         // clang-format off
@@ -680,7 +681,7 @@
     int r, c, row_stride, col_stride;
     CellInfo* cell_info =
         lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride);
-    if (cell_info != NULL) {
+    if (cell_info != nullptr) {
       std::lock_guard<std::mutex> l(cell_info->m);
       // block += b1.transpose() * b1;
       // clang-format off
@@ -700,7 +701,7 @@
       int r, c, row_stride, col_stride;
       CellInfo* cell_info =
           lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
-      if (cell_info != NULL) {
+      if (cell_info != nullptr) {
         // block += b1.transpose() * b2;
         std::lock_guard<std::mutex> l(cell_info->m);
         // clang-format off
diff --git a/internal/ceres/schur_eliminator_template.py b/internal/ceres/schur_eliminator_template.py
index 5051595..a6a8d56 100644
--- a/internal/ceres/schur_eliminator_template.py
+++ b/internal/ceres/schur_eliminator_template.py
@@ -130,20 +130,21 @@
 namespace ceres {
 namespace internal {
 
-SchurEliminatorBase* SchurEliminatorBase::Create(
+std::unique_ptr<SchurEliminatorBase> SchurEliminatorBase::Create(
     const LinearSolver::Options& options) {
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 """
 
-FACTORY = """  return new SchurEliminator<%s, %s, %s>(options);"""
+FACTORY = """  return std::make_unique<SchurEliminator<%s, %s, %s>>(options);"""
 
 FACTORY_FOOTER = """
 #endif
   VLOG(1) << "Template specializations not found for <"
           << options.row_block_size << "," << options.e_block_size << ","
           << options.f_block_size << ">";
-  return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
-      options);
+  return std::make_unique<SchurEliminator<Eigen::Dynamic,
+                                          Eigen::Dynamic,
+                                          Eigen::Dynamic>>(options);
 }
 
 }  // namespace internal
diff --git a/internal/ceres/schur_eliminator_test.cc b/internal/ceres/schur_eliminator_test.cc
index 6383ced..f586476 100644
--- a/internal/ceres/schur_eliminator_test.cc
+++ b/internal/ceres/schur_eliminator_test.cc
@@ -57,16 +57,16 @@
 class SchurEliminatorTest : public ::testing::Test {
  protected:
   void SetUpFromId(int id) {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(id));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(id);
     CHECK(problem != nullptr);
     SetupHelper(problem.get());
   }
 
   void SetupHelper(LinearLeastSquaresProblem* problem) {
     A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
-    b.reset(problem->b.release());
-    D.reset(problem->D.release());
+    b = std::move(problem->b);
+    D = std::move(problem->D);
 
     num_eliminate_blocks = problem->num_eliminate_blocks;
     num_eliminate_cols = 0;
@@ -150,8 +150,8 @@
                       &options.f_block_size);
     }
 
-    std::unique_ptr<SchurEliminatorBase> eliminator;
-    eliminator.reset(SchurEliminatorBase::Create(options));
+    std::unique_ptr<SchurEliminatorBase> eliminator =
+        SchurEliminatorBase::Create(options);
     const bool kFullRankETE = true;
     eliminator->Init(num_eliminate_blocks, kFullRankETE, A->block_structure());
     eliminator->Eliminate(
diff --git a/internal/ceres/schur_jacobi_preconditioner.cc b/internal/ceres/schur_jacobi_preconditioner.cc
index 283cd4b..ffab66f 100644
--- a/internal/ceres/schur_jacobi_preconditioner.cc
+++ b/internal/ceres/schur_jacobi_preconditioner.cc
@@ -51,14 +51,14 @@
   const int num_blocks = bs.cols.size() - options_.elimination_groups[0];
   CHECK_GT(num_blocks, 0) << "Jacobian should have at least 1 f_block for "
                           << "SCHUR_JACOBI preconditioner.";
-  CHECK(options_.context != NULL);
+  CHECK(options_.context != nullptr);
 
   std::vector<int> blocks(num_blocks);
   for (int i = 0; i < num_blocks; ++i) {
     blocks[i] = bs.cols[i + options_.elimination_groups[0]].size;
   }
 
-  m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
+  m_ = std::make_unique<BlockRandomAccessDiagonalMatrix>(blocks);
   InitEliminator(bs);
 }
 
@@ -74,7 +74,7 @@
   eliminator_options.f_block_size = options_.f_block_size;
   eliminator_options.row_block_size = options_.row_block_size;
   eliminator_options.context = options_.context;
-  eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
+  eliminator_ = SchurEliminatorBase::Create(eliminator_options);
   const bool kFullRankETE = true;
   eliminator_->Init(
       eliminator_options.elimination_groups[0], kFullRankETE, &bs);
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
index 7333988..81f584b 100644
--- a/internal/ceres/schur_jacobi_preconditioner.h
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -69,7 +69,7 @@
 //   options.elimination_groups.push_back(num_cameras);
 //   SchurJacobiPreconditioner preconditioner(
 //      *A.block_structure(), options);
-//   preconditioner.Update(A, NULL);
+//   preconditioner.Update(A, nullptr);
 //   preconditioner.RightMultiply(x, y);
 //
 class SchurJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
diff --git a/internal/ceres/scratch_evaluate_preparer.cc b/internal/ceres/scratch_evaluate_preparer.cc
index d8fee30..a59694e 100644
--- a/internal/ceres/scratch_evaluate_preparer.cc
+++ b/internal/ceres/scratch_evaluate_preparer.cc
@@ -49,7 +49,8 @@
 }
 
 void ScratchEvaluatePreparer::Init(int max_derivatives_per_residual_block) {
-  jacobian_scratch_.reset(new double[max_derivatives_per_residual_block]);
+  jacobian_scratch_ =
+      std::make_unique<double[]>(max_derivatives_per_residual_block);
 }
 
 // Point the jacobian blocks into the scratch area of this evaluate preparer.
@@ -64,7 +65,7 @@
     const ParameterBlock* parameter_block =
         residual_block->parameter_blocks()[j];
     if (parameter_block->IsConstant()) {
-      jacobians[j] = NULL;
+      jacobians[j] = nullptr;
     } else {
       jacobians[j] = jacobian_block_cursor;
       jacobian_block_cursor += num_residuals * parameter_block->TangentSize();
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index 491f066..1d2420c 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -367,7 +367,7 @@
                                  &(summary->inner_iteration_ordering_used));
 
   // clang-format off
-  summary->inner_iterations_used          = pp.inner_iteration_minimizer.get() != NULL;     // NOLINT
+  summary->inner_iterations_used          = pp.inner_iteration_minimizer.get() != nullptr;     // NOLINT
   summary->linear_solver_type_used        = pp.linear_solver_options.type;
   summary->num_threads_used               = pp.options.num_threads;
   summary->preconditioner_type_used       = pp.options.preconditioner_type;
@@ -375,7 +375,7 @@
 
   internal::SetSummaryFinalCost(summary);
 
-  if (pp.reduced_program.get() != NULL) {
+  if (pp.reduced_program.get() != nullptr) {
     SummarizeReducedProgram(*pp.reduced_program, summary);
   }
 
@@ -385,7 +385,7 @@
   // case if the preprocessor failed, or if the reduced problem did
   // not contain any parameter blocks. Thus, only extract the
   // evaluator statistics if one exists.
-  if (pp.evaluator.get() != NULL) {
+  if (pp.evaluator.get() != nullptr) {
     const map<string, CallStatistics>& evaluator_statistics =
         pp.evaluator->Statistics();
     {
@@ -407,7 +407,7 @@
   // Again, like the evaluator, there may or may not be a linear
   // solver from which we can extract run time statistics. In
   // particular the line search solver does not use a linear solver.
-  if (pp.linear_solver.get() != NULL) {
+  if (pp.linear_solver.get() != nullptr) {
     const map<string, CallStatistics>& linear_solver_statistics =
         pp.linear_solver->Statistics();
     const CallStatistics& call_stats = FindWithDefault(
@@ -518,11 +518,11 @@
   Solver::Options modified_options = options;
   if (options.check_gradients) {
     modified_options.callbacks.push_back(&gradient_checking_callback);
-    gradient_checking_problem.reset(CreateGradientCheckingProblemImpl(
+    gradient_checking_problem = CreateGradientCheckingProblemImpl(
         problem_impl,
         options.gradient_check_numeric_derivative_relative_step_size,
         options.gradient_check_relative_precision,
-        &gradient_checking_callback));
+        &gradient_checking_callback);
     problem_impl = gradient_checking_problem.get();
     program = problem_impl->mutable_program();
   }
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index 1ffce8f..2e52ae6 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -75,21 +75,21 @@
   A->LeftMultiply(b, rhs_.data());
   event_logger.AddEvent("Compute RHS");
 
-  if (per_solve_options.D != NULL) {
+  if (per_solve_options.D != nullptr) {
     // Temporarily append a diagonal block to the A matrix, but undo
     // it before returning the matrix to the user.
-    std::unique_ptr<BlockSparseMatrix> regularizer;
-    regularizer.reset(BlockSparseMatrix::CreateDiagonalMatrix(
-        per_solve_options.D, A->block_structure()->cols));
+    std::unique_ptr<BlockSparseMatrix> regularizer =
+        BlockSparseMatrix::CreateDiagonalMatrix(per_solve_options.D,
+                                                A->block_structure()->cols);
     event_logger.AddEvent("Diagonal");
     A->AppendRows(*regularizer);
     event_logger.AddEvent("Append");
   }
   event_logger.AddEvent("Append Rows");
 
-  if (inner_product_computer_.get() == NULL) {
-    inner_product_computer_.reset(
-        InnerProductComputer::Create(*A, sparse_cholesky_->StorageType()));
+  if (inner_product_computer_.get() == nullptr) {
+    inner_product_computer_ =
+        InnerProductComputer::Create(*A, sparse_cholesky_->StorageType());
 
     event_logger.AddEvent("InnerProductComputer::Create");
   }
@@ -97,7 +97,7 @@
   inner_product_computer_->Compute();
   event_logger.AddEvent("InnerProductComputer::Compute");
 
-  if (per_solve_options.D != NULL) {
+  if (per_solve_options.D != nullptr) {
     A->DeleteRowBlocks(A->block_structure()->cols.size());
   }
 
diff --git a/internal/ceres/sparse_normal_cholesky_solver_test.cc b/internal/ceres/sparse_normal_cholesky_solver_test.cc
index 8acb98e..2b00dcd 100644
--- a/internal/ceres/sparse_normal_cholesky_solver_test.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver_test.cc
@@ -54,20 +54,20 @@
 class SparseNormalCholeskySolverTest : public ::testing::Test {
  protected:
   void SetUp() final {
-    std::unique_ptr<LinearLeastSquaresProblem> problem(
-        CreateLinearLeastSquaresProblemFromId(2));
+    std::unique_ptr<LinearLeastSquaresProblem> problem =
+        CreateLinearLeastSquaresProblemFromId(2);
 
     CHECK(problem != nullptr);
     A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
-    b_.reset(problem->b.release());
-    D_.reset(problem->D.release());
+    b_ = std::move(problem->b);
+    D_ = std::move(problem->D);
   }
 
   void TestSolver(const LinearSolver::Options& options, double* D) {
     Matrix dense_A;
     A_->ToDenseMatrix(&dense_A);
     Matrix lhs = dense_A.transpose() * dense_A;
-    if (D != NULL) {
+    if (D != nullptr) {
       lhs += (ConstVectorRef(D, A_->num_cols()).array() *
               ConstVectorRef(D, A_->num_cols()).array())
                  .matrix()
@@ -97,7 +97,7 @@
   }
 
   void TestSolver(const LinearSolver::Options& options) {
-    TestSolver(options, NULL);
+    TestSolver(options, nullptr);
     TestSolver(options, D_.get());
   }
 
diff --git a/internal/ceres/stl_util.h b/internal/ceres/stl_util.h
index d3411b7..2af2518 100644
--- a/internal/ceres/stl_util.h
+++ b/internal/ceres/stl_util.h
@@ -73,7 +73,7 @@
 // hash_set, or any other STL container which defines sensible begin(), end(),
 // and clear() methods.
 //
-// If container is NULL, this function is a no-op.
+// If container is nullptr, this function is a no-op.
 //
 // As an alternative to calling STLDeleteElements() directly, consider
 // ElementDeleter (defined below), which ensures that your container's elements
diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
index b0e2acc..def40fe 100644
--- a/internal/ceres/stringprintf.cc
+++ b/internal/ceres/stringprintf.cc
@@ -66,7 +66,7 @@
     // Error or MSVC running out of space.  MSVC 8.0 and higher
     // can be asked about space needed with the special idiom below:
     va_copy(backup_ap, ap);
-    result = vsnprintf(NULL, 0, format, backup_ap);
+    result = vsnprintf(nullptr, 0, format, backup_ap);
     va_end(backup_ap);
 #endif
 
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
index e5146ce..b6b0c2e 100644
--- a/internal/ceres/subset_preconditioner.cc
+++ b/internal/ceres/subset_preconditioner.cc
@@ -73,7 +73,7 @@
   //     [Q]
 
   // Now add D to A if needed.
-  if (D != NULL) {
+  if (D != nullptr) {
     // A = [P]
     //     [Q]
     //     [D]
@@ -82,19 +82,19 @@
     m->AppendRows(*regularizer);
   }
 
-  if (inner_product_computer_.get() == NULL) {
-    inner_product_computer_.reset(InnerProductComputer::Create(
+  if (inner_product_computer_ == nullptr) {
+    inner_product_computer_ = InnerProductComputer::Create(
         *m,
         options_.subset_preconditioner_start_row_block,
         bs->rows.size(),
-        sparse_cholesky_->StorageType()));
+        sparse_cholesky_->StorageType());
   }
 
   // Compute inner_product = [Q'*Q + D'*D]
   inner_product_computer_->Compute();
 
   // Unappend D if needed.
-  if (D != NULL) {
+  if (D != nullptr) {
     // A = [P]
     //     [Q]
     m->DeleteRowBlocks(bs->cols.size());
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
index 202110b..7d606f1 100644
--- a/internal/ceres/subset_preconditioner_test.cc
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -99,28 +99,28 @@
     options.max_row_block_size = 4;
     options.block_density = 0.9;
 
-    m_.reset(BlockSparseMatrix::CreateRandomMatrix(options));
+    m_ = BlockSparseMatrix::CreateRandomMatrix(options);
     start_row_block_ = m_->block_structure()->rows.size();
 
     // Ensure that the bottom part of the matrix has the same column
     // block structure.
     options.col_blocks = m_->block_structure()->cols;
-    b_.reset(BlockSparseMatrix::CreateRandomMatrix(options));
+    b_ = BlockSparseMatrix::CreateRandomMatrix(options);
     m_->AppendRows(*b_);
 
     // Create a Identity block diagonal matrix with the same column
     // block structure.
     diagonal_ = Vector::Ones(m_->num_cols());
-    block_diagonal_.reset(BlockSparseMatrix::CreateDiagonalMatrix(
-        diagonal_.data(), b_->block_structure()->cols));
+    block_diagonal_ = BlockSparseMatrix::CreateDiagonalMatrix(
+        diagonal_.data(), b_->block_structure()->cols);
 
     // Unconditionally add the block diagonal to the matrix b_,
     // because either it is either part of b_ to make it full rank, or
     // we pass the same diagonal matrix later as the parameter D. In
     // either case the preconditioner matrix is b_' b + D'D.
     b_->AppendRows(*block_diagonal_);
-    inner_product_computer_.reset(InnerProductComputer::Create(
-        *b_, CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+    inner_product_computer_ = InnerProductComputer::Create(
+        *b_, CompressedRowSparseMatrix::UPPER_TRIANGULAR);
     inner_product_computer_->Compute();
   }
 
@@ -138,7 +138,7 @@
   Preconditioner::Options options;
   options.subset_preconditioner_start_row_block = start_row_block_;
   options.sparse_linear_algebra_library_type = ::testing::get<0>(param);
-  preconditioner_.reset(new SubsetPreconditioner(options, *m_));
+  preconditioner_ = std::make_unique<SubsetPreconditioner>(options, *m_);
 
   const bool with_diagonal = ::testing::get<1>(param);
   if (!with_diagonal) {
@@ -146,7 +146,7 @@
   }
 
   EXPECT_TRUE(
-      preconditioner_->Update(*m_, with_diagonal ? diagonal_.data() : NULL));
+      preconditioner_->Update(*m_, with_diagonal ? diagonal_.data() : nullptr));
 
   // Repeatedly apply the preconditioner to random vectors and check
   // that the preconditioned value is the same as one obtained by
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 0d6f6bd..ce8ac65 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -368,7 +368,7 @@
 LinearSolverTerminationType SuiteSparseCholesky::Factorize(
     CompressedRowSparseMatrix* lhs, string* message) {
   if (lhs == nullptr) {
-    *message = "Failure: Input lhs is NULL.";
+    *message = "Failure: Input lhs is nullptr.";
     return LINEAR_SOLVER_FATAL_ERROR;
   }
 
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index a98c946..8802774 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -106,7 +106,7 @@
   cholmod_dense CreateDenseVectorView(const double* x, int size);
 
   // Given a vector x, build a cholmod_dense vector of size out_size
-  // with the first in_size entries copied from x. If x is NULL, then
+  // with the first in_size entries copied from x. If x is nullptr, then
   // an all zeros vector is returned. Caller owns the result.
   cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size);
 
@@ -123,7 +123,7 @@
   // Create and return a matrix m = A * A'. Caller owns the
   // result. The matrix A is not modified.
   cholmod_sparse* AATranspose(cholmod_sparse* A) {
-    cholmod_sparse* m = cholmod_aat(A, NULL, A->nrow, 1, &cc_);
+    cholmod_sparse* m = cholmod_aat(A, nullptr, A->nrow, 1, &cc_);
     m->stype = 1;  // Pay attention to the upper triangular part.
     return m;
   }
@@ -196,7 +196,7 @@
 
   // Given a Cholesky factorization of a matrix A = LL^T, solve the
   // linear system Ax = b, and return the result. If the Solve fails
-  // NULL is returned. Caller owns the result.
+  // nullptr is returned. Caller owns the result.
   //
   // message contains an explanation of the failures if any.
   cholmod_dense* Solve(cholmod_factor* L,
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
index 11b0504..3f4450a 100644
--- a/internal/ceres/system_test.cc
+++ b/internal/ceres/system_test.cc
@@ -70,13 +70,13 @@
     x_[3] = 1.0;
 
     problem_.AddResidualBlock(
-        new AutoDiffCostFunction<F1, 1, 1, 1>(new F1), NULL, &x_[0], &x_[1]);
+        new AutoDiffCostFunction<F1, 1, 1, 1>(new F1), nullptr, &x_[0], &x_[1]);
     problem_.AddResidualBlock(
-        new AutoDiffCostFunction<F2, 1, 1, 1>(new F2), NULL, &x_[2], &x_[3]);
+        new AutoDiffCostFunction<F2, 1, 1, 1>(new F2), nullptr, &x_[2], &x_[3]);
     problem_.AddResidualBlock(
-        new AutoDiffCostFunction<F3, 1, 1, 1>(new F3), NULL, &x_[1], &x_[2]);
+        new AutoDiffCostFunction<F3, 1, 1, 1>(new F3), nullptr, &x_[1], &x_[2]);
     problem_.AddResidualBlock(
-        new AutoDiffCostFunction<F4, 1, 1, 1>(new F4), NULL, &x_[0], &x_[3]);
+        new AutoDiffCostFunction<F4, 1, 1, 1>(new F4), nullptr, &x_[0], &x_[3]);
 
     // Settings for the reference solution.
     options_.linear_solver_type = ceres::DENSE_QR;
diff --git a/internal/ceres/tiny_solver_cost_function_adapter_test.cc b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
index 6f57193..ff8c070 100644
--- a/internal/ceres/tiny_solver_cost_function_adapter_test.cc
+++ b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
@@ -85,8 +85,8 @@
   double* parameters[1] = {xyz};
 
   // Check that residual only evaluation works.
-  cost_function->Evaluate(parameters, expected_residuals.data(), NULL);
-  cfa(xyz, actual_residuals.data(), NULL);
+  cost_function->Evaluate(parameters, expected_residuals.data(), nullptr);
+  cfa(xyz, actual_residuals.data(), nullptr);
   EXPECT_NEAR(
       (expected_residuals - actual_residuals).norm() / actual_residuals.norm(),
       0.0,
diff --git a/internal/ceres/tiny_solver_test.cc b/internal/ceres/tiny_solver_test.cc
index 2e70694..45c021f 100644
--- a/internal/ceres/tiny_solver_test.cc
+++ b/internal/ceres/tiny_solver_test.cc
@@ -115,7 +115,7 @@
 void TestHelper(const Function& f, const Vector& x0) {
   Vector x = x0;
   Vec2 residuals;
-  f(x.data(), residuals.data(), NULL);
+  f(x.data(), residuals.data(), nullptr);
   EXPECT_GT(residuals.squaredNorm() / 2.0, 1e-10);
 
   TinySolver<Function> solver;
diff --git a/internal/ceres/triplet_sparse_matrix.cc b/internal/ceres/triplet_sparse_matrix.cc
index e4f7f9b..c409a59 100644
--- a/internal/ceres/triplet_sparse_matrix.cc
+++ b/internal/ceres/triplet_sparse_matrix.cc
@@ -123,9 +123,12 @@
   // Nothing to do if we have enough space already.
   if (new_max_num_nonzeros <= max_num_nonzeros_) return;
 
-  int* new_rows = new int[new_max_num_nonzeros];
-  int* new_cols = new int[new_max_num_nonzeros];
-  double* new_values = new double[new_max_num_nonzeros];
+  std::unique_ptr<int[]> new_rows =
+      std::make_unique<int[]>(new_max_num_nonzeros);
+  std::unique_ptr<int[]> new_cols =
+      std::make_unique<int[]>(new_max_num_nonzeros);
+  std::unique_ptr<double[]> new_values =
+      std::make_unique<double[]>(new_max_num_nonzeros);
 
   for (int i = 0; i < num_nonzeros_; ++i) {
     new_rows[i] = rows_[i];
@@ -133,10 +136,9 @@
     new_values[i] = values_[i];
   }
 
-  rows_.reset(new_rows);
-  cols_.reset(new_cols);
-  values_.reset(new_values);
-
+  rows_ = std::move(new_rows);
+  cols_ = std::move(new_cols);
+  values_ = std::move(new_values);
   max_num_nonzeros_ = new_max_num_nonzeros;
 }
 
@@ -152,9 +154,9 @@
 }
 
 void TripletSparseMatrix::AllocateMemory() {
-  rows_.reset(new int[max_num_nonzeros_]);
-  cols_.reset(new int[max_num_nonzeros_]);
-  values_.reset(new double[max_num_nonzeros_]);
+  rows_ = std::make_unique<int[]>(max_num_nonzeros_);
+  cols_ = std::make_unique<int[]>(max_num_nonzeros_);
+  values_ = std::make_unique<double[]>(max_num_nonzeros_);
 }
 
 void TripletSparseMatrix::CopyData(const TripletSparseMatrix& orig) {
@@ -252,10 +254,11 @@
   num_nonzeros_ -= dropped_terms;
 }
 
-TripletSparseMatrix* TripletSparseMatrix::CreateSparseDiagonalMatrix(
-    const double* values, int num_rows) {
-  TripletSparseMatrix* m =
-      new TripletSparseMatrix(num_rows, num_rows, num_rows);
+std::unique_ptr<TripletSparseMatrix>
+TripletSparseMatrix::CreateSparseDiagonalMatrix(const double* values,
+                                                int num_rows) {
+  std::unique_ptr<TripletSparseMatrix> m =
+      std::make_unique<TripletSparseMatrix>(num_rows, num_rows, num_rows);
   for (int i = 0; i < num_rows; ++i) {
     m->mutable_rows()[i] = i;
     m->mutable_cols()[i] = i;
@@ -272,7 +275,7 @@
   }
 }
 
-TripletSparseMatrix* TripletSparseMatrix::CreateRandomMatrix(
+std::unique_ptr<TripletSparseMatrix> TripletSparseMatrix::CreateRandomMatrix(
     const TripletSparseMatrix::RandomMatrixOptions& options) {
   CHECK_GT(options.num_rows, 0);
   CHECK_GT(options.num_cols, 0);
@@ -297,7 +300,7 @@
     }
   }
 
-  return new TripletSparseMatrix(
+  return std::make_unique<TripletSparseMatrix>(
       options.num_rows, options.num_cols, rows, cols, values);
 }
 
diff --git a/internal/ceres/triplet_sparse_matrix.h b/internal/ceres/triplet_sparse_matrix.h
index daa4643..ba426c4 100644
--- a/internal/ceres/triplet_sparse_matrix.h
+++ b/internal/ceres/triplet_sparse_matrix.h
@@ -115,8 +115,8 @@
   // Build a sparse diagonal matrix of size num_rows x num_rows from
   // the array values. Entries of the values array are copied into the
   // sparse matrix.
-  static TripletSparseMatrix* CreateSparseDiagonalMatrix(const double* values,
-                                                         int num_rows);
+  static std::unique_ptr<TripletSparseMatrix> CreateSparseDiagonalMatrix(
+      const double* values, int num_rows);
 
   // Options struct to control the generation of random
   // TripletSparseMatrix objects.
@@ -132,9 +132,7 @@
   // Create a random CompressedRowSparseMatrix whose entries are
   // normally distributed and whose structure is determined by
   // RandomMatrixOptions.
-  //
-  // Caller owns the result.
-  static TripletSparseMatrix* CreateRandomMatrix(
+  static std::unique_ptr<TripletSparseMatrix> CreateRandomMatrix(
       const TripletSparseMatrix::RandomMatrixOptions& options);
 
  private:
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 5e292bb..f8e6b29 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -75,11 +75,11 @@
   // Create the TrustRegionStepEvaluator. The construction needs to be
   // delayed to this point because we need the cost for the starting
   // point to initialize the step evaluator.
-  step_evaluator_.reset(new TrustRegionStepEvaluator(
+  step_evaluator_ = std::make_unique<TrustRegionStepEvaluator>(
       x_cost_,
       options_.use_nonmonotonic_steps
           ? options_.max_consecutive_nonmonotonic_steps
-          : 0));
+          : 0);
 
   while (FinalizeIterationAndCheckIfMinimizerCanContinue()) {
     iteration_start_time_in_secs_ = WallTimeInSeconds();
diff --git a/internal/ceres/trust_region_minimizer_test.cc b/internal/ceres/trust_region_minimizer_test.cc
index 7f2c931..d42aab0 100644
--- a/internal/ceres/trust_region_minimizer_test.cc
+++ b/internal/ceres/trust_region_minimizer_test.cc
@@ -79,10 +79,10 @@
   ~PowellEvaluator2() override = default;
 
   // Implementation of Evaluator interface.
-  SparseMatrix* CreateJacobian() const final {
+  std::unique_ptr<SparseMatrix> CreateJacobian() const final {
     CHECK(col1 || col2 || col3 || col4);
-    DenseSparseMatrix* dense_jacobian =
-        new DenseSparseMatrix(NumResiduals(), NumEffectiveParameters());
+    auto dense_jacobian = std::make_unique<DenseSparseMatrix>(
+        NumResiduals(), NumEffectiveParameters());
     dense_jacobian->SetZero();
     return dense_jacobian;
   }
@@ -119,14 +119,14 @@
 
     VLOG(1) << "Cost: " << *cost;
 
-    if (residuals != NULL) {
+    if (residuals != nullptr) {
       residuals[0] = f1;
       residuals[1] = f2;
       residuals[2] = f3;
       residuals[3] = f4;
     }
 
-    if (jacobian != NULL) {
+    if (jacobian != nullptr) {
       DenseSparseMatrix* dense_jacobian;
       dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
       dense_jacobian->SetZero();
@@ -176,7 +176,7 @@
       VLOG(1) << "\n" << jacobian_matrix;
     }
 
-    if (gradient != NULL) {
+    if (gradient != nullptr) {
       int column_index = 0;
       if (col1) {
         gradient[column_index++] = f1 + f4 * sqrt(10.0) * 2.0 * (x1 - x4);
@@ -240,10 +240,9 @@
   minimizer_options.gradient_tolerance = 1e-26;
   minimizer_options.function_tolerance = 1e-26;
   minimizer_options.parameter_tolerance = 1e-26;
-  minimizer_options.evaluator.reset(
-      new PowellEvaluator2<col1, col2, col3, col4>);
-  minimizer_options.jacobian.reset(
-      minimizer_options.evaluator->CreateJacobian());
+  minimizer_options.evaluator =
+      std::make_unique<PowellEvaluator2<col1, col2, col3, col4>>();
+  minimizer_options.jacobian = minimizer_options.evaluator->CreateJacobian();
 
   TrustRegionStrategy::Options trust_region_strategy_options;
   trust_region_strategy_options.trust_region_strategy_type = strategy_type;
@@ -252,8 +251,8 @@
   trust_region_strategy_options.max_radius = 1e20;
   trust_region_strategy_options.min_lm_diagonal = 1e-6;
   trust_region_strategy_options.max_lm_diagonal = 1e32;
-  minimizer_options.trust_region_strategy.reset(
-      TrustRegionStrategy::Create(trust_region_strategy_options));
+  minimizer_options.trust_region_strategy =
+      TrustRegionStrategy::Create(trust_region_strategy_options);
 
   TrustRegionMinimizer minimizer;
   Solver::Summary summary;
@@ -343,12 +342,12 @@
       residuals[0] -= sqrt(length);
     }
 
-    if (jacobians == NULL) {
+    if (jacobians == nullptr) {
       return true;
     }
 
     for (int i = 0; i < num_vertices_; ++i) {
-      if (jacobians[i] != NULL) {
+      if (jacobians[i] != nullptr) {
         int prev = (num_vertices_ + i - 1) % num_vertices_;
         int next = (i + 1) % num_vertices_;
 
@@ -398,7 +397,7 @@
   }
 
   Problem problem;
-  problem.AddResidualBlock(new CurveCostFunction(N, 10.), NULL, y);
+  problem.AddResidualBlock(new CurveCostFunction(N, 10.), nullptr, y);
   Solver::Options options;
   options.linear_solver_type = ceres::DENSE_QR;
   Solver::Summary summary;
@@ -425,7 +424,7 @@
 TEST(TrustRegionMinimizer, GradientToleranceConvergenceUpdatesStep) {
   double x = 5;
   Problem problem;
-  problem.AddResidualBlock(ExpCostFunctor::Create(), NULL, &x);
+  problem.AddResidualBlock(ExpCostFunctor::Create(), nullptr, &x);
   problem.SetParameterLowerBound(&x, 0, 3.0);
   Solver::Options options;
   Solver::Summary summary;
diff --git a/internal/ceres/trust_region_preprocessor.cc b/internal/ceres/trust_region_preprocessor.cc
index fc76369..a6a8b9f 100644
--- a/internal/ceres/trust_region_preprocessor.cc
+++ b/internal/ceres/trust_region_preprocessor.cc
@@ -55,9 +55,10 @@
 
 namespace {
 
-ParameterBlockOrdering* CreateDefaultLinearSolverOrdering(
+std::shared_ptr<ParameterBlockOrdering> CreateDefaultLinearSolverOrdering(
     const Program& program) {
-  ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
+  std::shared_ptr<ParameterBlockOrdering> ordering =
+      std::make_shared<ParameterBlockOrdering>();
   const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
   for (int i = 0; i < parameter_blocks.size(); ++i) {
     ordering->AddElementToGroup(
@@ -160,8 +161,8 @@
     // assume that they are giving all the freedom to us in choosing
     // the best possible ordering. This intent can be indicated by
     // putting all the parameter blocks in the same elimination group.
-    options.linear_solver_ordering.reset(
-        CreateDefaultLinearSolverOrdering(*pp->reduced_program));
+    options.linear_solver_ordering =
+        CreateDefaultLinearSolverOrdering(*pp->reduced_program);
   } else {
     // If the user supplied an ordering, then check if the first
     // elimination group is still non-empty after the reduced problem
@@ -247,7 +248,7 @@
     }
   }
 
-  pp->linear_solver.reset(LinearSolver::Create(pp->linear_solver_options));
+  pp->linear_solver = LinearSolver::Create(pp->linear_solver_options);
   return (pp->linear_solver != nullptr);
 }
 
@@ -269,8 +270,8 @@
   pp->evaluator_options.context = pp->problem->context();
   pp->evaluator_options.evaluation_callback =
       pp->reduced_program->mutable_evaluation_callback();
-  pp->evaluator.reset(Evaluator::Create(
-      pp->evaluator_options, pp->reduced_program.get(), &pp->error));
+  pp->evaluator = Evaluator::Create(
+      pp->evaluator_options, pp->reduced_program.get(), &pp->error);
 
   return (pp->evaluator != nullptr);
 }
@@ -316,12 +317,12 @@
     }
   } else {
     // The user did not supply an ordering, so create one.
-    options.inner_iteration_ordering.reset(
-        CoordinateDescentMinimizer::CreateOrdering(*pp->reduced_program));
+    options.inner_iteration_ordering =
+        CoordinateDescentMinimizer::CreateOrdering(*pp->reduced_program);
   }
 
-  pp->inner_iteration_minimizer.reset(
-      new CoordinateDescentMinimizer(pp->problem->context()));
+  pp->inner_iteration_minimizer =
+      std::make_unique<CoordinateDescentMinimizer>(pp->problem->context());
   return pp->inner_iteration_minimizer->Init(*pp->reduced_program,
                                              pp->problem->parameter_map(),
                                              *options.inner_iteration_ordering,
@@ -335,7 +336,7 @@
   SetupCommonMinimizerOptions(pp);
   pp->minimizer_options.is_constrained =
       pp->reduced_program->IsBoundsConstrained();
-  pp->minimizer_options.jacobian.reset(pp->evaluator->CreateJacobian());
+  pp->minimizer_options.jacobian = pp->evaluator->CreateJacobian();
   pp->minimizer_options.inner_iteration_minimizer =
       pp->inner_iteration_minimizer;
 
@@ -348,8 +349,8 @@
   strategy_options.trust_region_strategy_type =
       options.trust_region_strategy_type;
   strategy_options.dogleg_type = options.dogleg_type;
-  pp->minimizer_options.trust_region_strategy.reset(
-      TrustRegionStrategy::Create(strategy_options));
+  pp->minimizer_options.trust_region_strategy =
+      TrustRegionStrategy::Create(strategy_options);
   CHECK(pp->minimizer_options.trust_region_strategy != nullptr);
 }
 
@@ -370,10 +371,10 @@
     return false;
   }
 
-  pp->reduced_program.reset(program->CreateReducedProgram(
-      &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error));
+  pp->reduced_program = program->CreateReducedProgram(
+      &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error);
 
-  if (pp->reduced_program.get() == NULL) {
+  if (pp->reduced_program.get() == nullptr) {
     return false;
   }
 
diff --git a/internal/ceres/trust_region_preprocessor_test.cc b/internal/ceres/trust_region_preprocessor_test.cc
index ee93df3..fbec589 100644
--- a/internal/ceres/trust_region_preprocessor_test.cc
+++ b/internal/ceres/trust_region_preprocessor_test.cc
@@ -225,7 +225,7 @@
 TEST_F(LinearSolverAndEvaluatorCreationTest, SchurTypeSolverWithBadOrdering) {
   Solver::Options options;
   options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering = std::make_shared<ParameterBlockOrdering>();
   options.linear_solver_ordering->AddElementToGroup(&x_, 0);
   options.linear_solver_ordering->AddElementToGroup(&y_, 0);
   options.linear_solver_ordering->AddElementToGroup(&z_, 1);
@@ -238,7 +238,7 @@
 TEST_F(LinearSolverAndEvaluatorCreationTest, SchurTypeSolverWithGoodOrdering) {
   Solver::Options options;
   options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering = std::make_shared<ParameterBlockOrdering>();
   options.linear_solver_ordering->AddElementToGroup(&x_, 0);
   options.linear_solver_ordering->AddElementToGroup(&z_, 0);
   options.linear_solver_ordering->AddElementToGroup(&y_, 1);
@@ -260,7 +260,7 @@
 
   Solver::Options options;
   options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering = std::make_shared<ParameterBlockOrdering>();
   options.linear_solver_ordering->AddElementToGroup(&x_, 0);
   options.linear_solver_ordering->AddElementToGroup(&z_, 0);
   options.linear_solver_ordering->AddElementToGroup(&y_, 1);
@@ -281,7 +281,7 @@
 
   Solver::Options options;
   options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering = std::make_shared<ParameterBlockOrdering>();
   options.linear_solver_ordering->AddElementToGroup(&x_, 0);
   options.linear_solver_ordering->AddElementToGroup(&z_, 0);
   options.linear_solver_ordering->AddElementToGroup(&y_, 1);
@@ -328,7 +328,7 @@
 TEST_F(LinearSolverAndEvaluatorCreationTest, InvalidInnerIterationsOrdering) {
   Solver::Options options;
   options.use_inner_iterations = true;
-  options.inner_iteration_ordering.reset(new ParameterBlockOrdering);
+  options.inner_iteration_ordering = std::make_shared<ParameterBlockOrdering>();
   options.inner_iteration_ordering->AddElementToGroup(&x_, 0);
   options.inner_iteration_ordering->AddElementToGroup(&z_, 0);
   options.inner_iteration_ordering->AddElementToGroup(&y_, 0);
@@ -341,7 +341,7 @@
 TEST_F(LinearSolverAndEvaluatorCreationTest, ValidInnerIterationsOrdering) {
   Solver::Options options;
   options.use_inner_iterations = true;
-  options.inner_iteration_ordering.reset(new ParameterBlockOrdering);
+  options.inner_iteration_ordering = std::make_shared<ParameterBlockOrdering>();
   options.inner_iteration_ordering->AddElementToGroup(&x_, 0);
   options.inner_iteration_ordering->AddElementToGroup(&z_, 0);
   options.inner_iteration_ordering->AddElementToGroup(&y_, 1);
diff --git a/internal/ceres/trust_region_strategy.cc b/internal/ceres/trust_region_strategy.cc
index 235bf98..c6961a1 100644
--- a/internal/ceres/trust_region_strategy.cc
+++ b/internal/ceres/trust_region_strategy.cc
@@ -40,12 +40,13 @@
 
 TrustRegionStrategy::~TrustRegionStrategy() = default;
 
-TrustRegionStrategy* TrustRegionStrategy::Create(const Options& options) {
+std::unique_ptr<TrustRegionStrategy> TrustRegionStrategy::Create(
+    const Options& options) {
   switch (options.trust_region_strategy_type) {
     case LEVENBERG_MARQUARDT:
-      return new LevenbergMarquardtStrategy(options);
+      return std::make_unique<LevenbergMarquardtStrategy>(options);
     case DOGLEG:
-      return new DoglegStrategy(options);
+      return std::make_unique<DoglegStrategy>(options);
     default:
       LOG(FATAL) << "Unknown trust region strategy: "
                  << options.trust_region_strategy_type;
@@ -53,7 +54,7 @@
 
   LOG(FATAL) << "Unknown trust region strategy: "
              << options.trust_region_strategy_type;
-  return NULL;
+  return nullptr;
 }
 
 }  // namespace internal
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 176f73a..d520917 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -75,7 +75,7 @@
   };
 
   // Factory.
-  static TrustRegionStrategy* Create(const Options& options);
+  static std::unique_ptr<TrustRegionStrategy> Create(const Options& options);
 
   virtual ~TrustRegionStrategy();
 
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
index 82bf6f1..06f31ec 100644
--- a/internal/ceres/visibility.cc
+++ b/internal/ceres/visibility.cc
@@ -81,7 +81,7 @@
 
 WeightedGraph<int>* CreateSchurComplementGraph(
     const vector<set<int>>& visibility) {
-  const time_t start_time = time(NULL);
+  const time_t start_time = time(nullptr);
   // Compute the number of e_blocks/point blocks. Since the visibility
   // set for each e_block/camera contains the set of e_blocks/points
   // visible to it, we find the maximum across all visibility sets.
@@ -146,7 +146,7 @@
     graph->AddEdge(camera1, camera2, weight);
   }
 
-  VLOG(2) << "Schur complement graph time: " << (time(NULL) - start_time);
+  VLOG(2) << "Schur complement graph time: " << (time(nullptr) - start_time);
   return graph;
 }
 
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 566dcfa..3e3604f 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -80,7 +80,7 @@
   num_blocks_ = bs.cols.size() - options_.elimination_groups[0];
   CHECK_GT(num_blocks_, 0) << "Jacobian should have at least 1 f_block for "
                            << "visibility based preconditioning.";
-  CHECK(options_.context != NULL);
+  CHECK(options_.context != nullptr);
 
   // Vector of camera block sizes
   block_size_.resize(num_blocks_);
@@ -88,7 +88,7 @@
     block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size;
   }
 
-  const time_t start_time = time(NULL);
+  const time_t start_time = time(nullptr);
   switch (options_.type) {
     case CLUSTER_JACOBI:
       ComputeClusterJacobiSparsity(bs);
@@ -99,11 +99,11 @@
     default:
       LOG(FATAL) << "Unknown preconditioner type";
   }
-  const time_t structure_time = time(NULL);
+  const time_t structure_time = time(nullptr);
   InitStorage(bs);
-  const time_t storage_time = time(NULL);
+  const time_t storage_time = time(nullptr);
   InitEliminator(bs);
-  const time_t eliminator_time = time(NULL);
+  const time_t eliminator_time = time(nullptr);
 
   LinearSolver::Options sparse_cholesky_options;
   sparse_cholesky_options.sparse_linear_algebra_library_type =
@@ -118,7 +118,7 @@
   sparse_cholesky_options.use_postordering = true;
   sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
 
-  const time_t init_time = time(NULL);
+  const time_t init_time = time(nullptr);
   VLOG(2) << "init time: " << init_time - start_time
           << " structure time: " << structure_time - start_time
           << " storage time:" << storage_time - structure_time
@@ -175,7 +175,8 @@
 void VisibilityBasedPreconditioner::InitStorage(
     const CompressedRowBlockStructure& bs) {
   ComputeBlockPairsInPreconditioner(bs);
-  m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs_));
+  m_ = std::make_unique<BlockRandomAccessSparseMatrix>(block_size_,
+                                                       block_pairs_);
 }
 
 // Call the canonical views algorithm and cluster the cameras based on
@@ -328,7 +329,7 @@
   eliminator_options.f_block_size = options_.f_block_size;
   eliminator_options.row_block_size = options_.row_block_size;
   eliminator_options.context = options_.context;
-  eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
+  eliminator_ = SchurEliminatorBase::Create(eliminator_options);
   const bool kFullRankETE = true;
   eliminator_->Init(
       eliminator_options.elimination_groups[0], kFullRankETE, &bs);
@@ -337,7 +338,7 @@
 // Update the values of the preconditioner matrix and factorize it.
 bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
                                                const double* D) {
-  const time_t start_time = time(NULL);
+  const time_t start_time = time(nullptr);
   const int num_rows = m_->num_rows();
   CHECK_GT(num_rows, 0);
 
@@ -375,7 +376,7 @@
     status = Factorize();
   }
 
-  VLOG(2) << "Compute time: " << time(NULL) - start_time;
+  VLOG(2) << "Compute time: " << time(nullptr) - start_time;
   return (status == LINEAR_SOLVER_SUCCESS);
 }
 
@@ -395,7 +396,7 @@
     int r, c, row_stride, col_stride;
     CellInfo* cell_info =
         m_->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
-    CHECK(cell_info != NULL)
+    CHECK(cell_info != nullptr)
         << "Cell missing for block pair (" << block1 << "," << block2 << ")"
         << " cluster pair (" << cluster_membership_[block1] << " "
         << cluster_membership_[block2] << ")";
@@ -420,11 +421,10 @@
   const CompressedRowSparseMatrix::StorageType storage_type =
       sparse_cholesky_->StorageType();
   if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
-    lhs.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    lhs = CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm);
     lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
   } else {
-    lhs.reset(
-        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+    lhs = CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm);
     lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
   }
 
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index 7146e3f..0da42a2 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -122,7 +122,7 @@
 //   options.elimination_groups.push_back(num_cameras);
 //   VisibilityBasedPreconditioner preconditioner(
 //      *A.block_structure(), options);
-//   preconditioner.Update(A, NULL);
+//   preconditioner.Update(A, nullptr);
 //   preconditioner.RightMultiply(x, y);
 class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
  public:
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
index 10aa619..8fcae50 100644
--- a/internal/ceres/visibility_based_preconditioner_test.cc
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -67,8 +67,8 @@
 //   void SetUp() {
 //     string input_file = TestFileAbsolutePath("problem-6-1384-000.lsqp");
 
-//     std::unique_ptr<LinearLeastSquaresProblem> problem(
-//         CHECK_NOTNULL(CreateLinearLeastSquaresProblemFromFile(input_file)));
+//     std::unique_ptr<LinearLeastSquaresProblem> problem =
+//     CreateLinearLeastSquaresProblemFromFile(input_file));
 //     A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
 //     b_.reset(problem->b.release());
 //     D_.reset(problem->D.release());
@@ -96,7 +96,8 @@
 //     // conditioned.
 //     VectorRef(D_.get(), num_cols_).setConstant(10.0);
 
-//     schur_complement_.reset(new BlockRandomAccessDenseMatrix(blocks));
+//     schur_complement_ =
+//     std::make_unique<BlockRandomAccessDenseMatrix>(blocks);
 //     Vector rhs(schur_complement_->num_rows());
 
 //     std::unique_ptr<SchurEliminatorBase> eliminator;
@@ -104,7 +105,7 @@
 //     eliminator_options.elimination_groups = options_.elimination_groups;
 //     eliminator_options.num_threads = options_.num_threads;
 
-//     eliminator.reset(SchurEliminatorBase::Create(eliminator_options));
+//     eliminator = SchurEliminatorBase::Create(eliminator_options);
 //     eliminator->Init(num_eliminate_blocks_, bs);
 //     eliminator->Eliminate(A_.get(), b_.get(), D_.get(),
 //                           schur_complement_.get(), rhs.data());
@@ -242,8 +243,9 @@
 
 // TEST_F(VisibilityBasedPreconditionerTest, OneClusterClusterJacobi) {
 //   options_.type = CLUSTER_JACOBI;
-//   preconditioner_.reset(
-//       new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+//   preconditioner_ =
+//       std::make_unique<VisibilityBasedPreconditioner>(
+//          *A_->block_structure(), options_);
 
 //   // Override the clustering to be a single clustering containing all
 //   // the cameras.
@@ -287,8 +289,9 @@
 
 // TEST_F(VisibilityBasedPreconditionerTest, ClusterJacobi) {
 //   options_.type = CLUSTER_JACOBI;
-//   preconditioner_.reset(
-//       new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+//   preconditioner_ =
+//   std::make_unique<VisibilityBasedPreconditioner>(*A_->block_structure(),
+//   options_);
 
 //   // Override the clustering to be equal number of cameras.
 //   vector<int>& cluster_membership = *get_mutable_cluster_membership();
@@ -312,8 +315,9 @@
 
 // TEST_F(VisibilityBasedPreconditionerTest, ClusterTridiagonal) {
 //   options_.type = CLUSTER_TRIDIAGONAL;
-//   preconditioner_.reset(
-//       new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+//   preconditioner_ =
+//     std::make_unique<VisibilityBasedPreconditioner>(*A_->block_structure(),
+//     options_);
 //   static const int kNumClusters = 3;
 
 //   // Override the clustering to be 3 clusters.
diff --git a/internal/ceres/wall_time.cc b/internal/ceres/wall_time.cc
index 7163927..bd920ac 100644
--- a/internal/ceres/wall_time.cc
+++ b/internal/ceres/wall_time.cc
@@ -58,7 +58,7 @@
          static_cast<double>(frequency.QuadPart);
 #else
   timeval time_val;
-  gettimeofday(&time_val, NULL);
+  gettimeofday(&time_val, nullptr);
   return (time_val.tv_sec + time_val.tv_usec * 1e-6);
 #endif
 #endif