Skip to content

Commit

Permalink
[base/allocator] Refactor the initialization lock.
Browse files Browse the repository at this point in the history
Since it's used in more than one place, use a Scoper object to handle
it. Also switch to the same CAS primitive as in spinning_mutex.h for
consistency.

Bug: 998048
Change-Id: I3024b1f97043341828b13701491d89fdeb703488
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3100110
Reviewed-by: Bartek Nowierski <bartekn@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#913782}
  • Loading branch information
Benoit Lize authored and Chromium LUCI CQ committed Aug 20, 2021
1 parent b04f70f commit 1ff87d0
Showing 1 changed file with 24 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,27 @@ using base::allocator::AllocatorDispatch;

namespace {

class SimpleScopedSpinLocker {
public:
explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
// Lock. Semantically equivalent to base::Lock::Acquire().
bool expected = false;
// Weak CAS since we are in a retry loop, relaxed ordering for failure since
// in this case we don't imply any ordering.
//
// This matches partition_allocator/spinning_mutex.h fast path on Linux.
while (!lock_.compare_exchange_weak(
expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
expected = false;
}
}

~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }

private:
std::atomic<bool>& lock_;
};

// We can't use a "static local" or a base::LazyInstance, as:
// - static local variables call into the runtime on Windows, which is not
// prepared to handle it, as the first allocation happens during CRT init.
Expand All @@ -57,19 +78,11 @@ class LeakySingleton {

// Replaces the instance pointer with a new one.
void Replace(T* new_instance) {
// Lock. Semantically equivalent to base::Lock::Acquire().
bool expected = false;
while (!initialization_lock_.compare_exchange_strong(
expected, true, std::memory_order_acquire, std::memory_order_acquire)) {
expected = false;
}
SimpleScopedSpinLocker scoped_lock{initialization_lock_};

// Modify under the lock to avoid race between |if (instance)| and
// |instance_.store()| in GetSlowPath().
instance_.store(new_instance, std::memory_order_release);

// Unlock.
initialization_lock_.store(false, std::memory_order_release);
}

private:
Expand Down Expand Up @@ -99,28 +112,16 @@ T* LeakySingleton<T, Constructor>::GetSlowPath() {
// However, we don't want to use a base::Lock here, so instead we use
// compare-and-exchange on a lock variable, which provides the same
// guarantees.
//
// Lock. Semantically equivalent to base::Lock::Acquire().
bool expected = false;
while (!initialization_lock_.compare_exchange_strong(
expected, true, std::memory_order_acquire, std::memory_order_acquire)) {
expected = false;
}
SimpleScopedSpinLocker scoped_lock{initialization_lock_};

T* instance = instance_.load(std::memory_order_relaxed);
// Someone beat us.
if (instance) {
// Unlock.
initialization_lock_.store(false, std::memory_order_release);
if (instance)
return instance;
}

instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
instance_.store(instance, std::memory_order_release);

// Unlock.
initialization_lock_.store(false, std::memory_order_release);

return instance;
}

Expand Down

0 comments on commit 1ff87d0

Please sign in to comment.