Converts std::unique_lock to std::lock_guard. Tested by compiling for CXX threads, OpenMP, no threads, and TBB. Change-Id: If1ba5cfce83e2ad4e1015354ce67f5b23e89101f
diff --git a/internal/ceres/concurrent_queue.h b/internal/ceres/concurrent_queue.h index c4e076f..52e2903 100644 --- a/internal/ceres/concurrent_queue.h +++ b/internal/ceres/concurrent_queue.h
@@ -83,7 +83,7 @@ // Atomically push an element onto the queue. If a thread was waiting for an // element, wake it up. void Push(const T& value) { - std::unique_lock<std::mutex> lock(mutex_); + std::lock_guard<std::mutex> lock(mutex_); queue_.push(value); work_pending_condition_.notify_one(); } @@ -93,7 +93,7 @@ bool Pop(T* value) { CHECK(value != nullptr); - std::unique_lock<std::mutex> lock(mutex_); + std::lock_guard<std::mutex> lock(mutex_); return PopUnlocked(value); } @@ -114,14 +114,14 @@ // exit Wait() without getting a value. All future Wait requests will return // immediately if no element is present until EnableWaiters is called. void StopWaiters() { - std::unique_lock<std::mutex> lock(mutex_); + std::lock_guard<std::mutex> lock(mutex_); wait_ = false; work_pending_condition_.notify_all(); } // Enable threads to block on Wait calls. void EnableWaiters() { - std::unique_lock<std::mutex> lock(mutex_); + std::lock_guard<std::mutex> lock(mutex_); wait_ = true; }
diff --git a/internal/ceres/concurrent_queue_test.cc b/internal/ceres/concurrent_queue_test.cc index 3b15c4b..698966a 100644 --- a/internal/ceres/concurrent_queue_test.cc +++ b/internal/ceres/concurrent_queue_test.cc
@@ -189,7 +189,7 @@ std::thread thread([&]() { { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); waiting = true; } @@ -197,7 +197,7 @@ bool valid = queue.Wait(&element); { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); waiting = false; value = element; valid_value = valid; @@ -209,7 +209,7 @@ // Ensure nothing is has been popped off the queue { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); EXPECT_TRUE(waiting); ASSERT_FALSE(valid_value); ASSERT_EQ(0, value); @@ -234,7 +234,7 @@ auto task = [&]() { { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); waiting = true; } @@ -242,7 +242,7 @@ bool valid = queue.Wait(&element); { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); waiting = false; value = element; valid_value = valid; @@ -256,7 +256,7 @@ // Ensure the thread is waiting. { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); EXPECT_TRUE(waiting); } @@ -286,7 +286,7 @@ // Ensure nothing is popped off the queue. { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); EXPECT_TRUE(waiting); ASSERT_FALSE(valid_value); ASSERT_EQ(0, value);
diff --git a/internal/ceres/parallel_for_cxx.cc b/internal/ceres/parallel_for_cxx.cc index 3da5a87..20a689d 100644 --- a/internal/ceres/parallel_for_cxx.cc +++ b/internal/ceres/parallel_for_cxx.cc
@@ -60,7 +60,7 @@ // Increment the number of jobs that have finished and signal the blocking // thread if all jobs have finished. void Finished() { - std::unique_lock<std::mutex> lock(mutex_); + std::lock_guard<std::mutex> lock(mutex_); ++num_finished_; CHECK_LE(num_finished_, num_total_); if (num_finished_ == num_total_) { @@ -196,7 +196,7 @@ { // Get the next available chunk of work to be performed. If there is no // work, return false. - std::unique_lock<std::mutex> lock(shared_state->mutex_i); + std::lock_guard<std::mutex> lock(shared_state->mutex_i); if (shared_state->i >= shared_state->num_work_items) { return false; }
diff --git a/internal/ceres/thread_pool.cc b/internal/ceres/thread_pool.cc index 9c7bb89..8fc7f83 100644 --- a/internal/ceres/thread_pool.cc +++ b/internal/ceres/thread_pool.cc
@@ -62,7 +62,7 @@ } ThreadPool::~ThreadPool() { - std::unique_lock<std::mutex> lock(thread_pool_mutex_); + std::lock_guard<std::mutex> lock(thread_pool_mutex_); // Signal the thread workers to stop and wait for them to finish all scheduled // tasks. Stop(); @@ -72,7 +72,7 @@ } void ThreadPool::Resize(int num_threads) { - std::unique_lock<std::mutex> lock(thread_pool_mutex_); + std::lock_guard<std::mutex> lock(thread_pool_mutex_); const int num_current_threads = thread_pool_.size(); if (num_current_threads >= num_threads) { @@ -92,7 +92,7 @@ } int ThreadPool::Size() { - std::unique_lock<std::mutex> lock(thread_pool_mutex_); + std::lock_guard<std::mutex> lock(thread_pool_mutex_); return thread_pool_.size(); }
diff --git a/internal/ceres/thread_pool_test.cc b/internal/ceres/thread_pool_test.cc index 5485fe4..2b1bf87 100644 --- a/internal/ceres/thread_pool_test.cc +++ b/internal/ceres/thread_pool_test.cc
@@ -59,7 +59,7 @@ for (int i = 0; i < num_tasks; ++i) { thread_pool.AddTask([&]() { - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); ++value; condition.notify_all(); }); @@ -96,7 +96,7 @@ auto task = [&]() { // This will block until the mutex is released inside the condition // variable. - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); ++value; condition.notify_all(); }; @@ -150,7 +150,7 @@ thread_pool.AddTask([&]() { // This will block until the mutex is released inside the condition // variable. - std::unique_lock<std::mutex> lock(mutex); + std::lock_guard<std::mutex> lock(mutex); ++value; condition.notify_all(); });