diff --git a/api/test/common/spinlock_benchmark.cc b/api/test/common/spinlock_benchmark.cc index 95c3f7c7af..28692bee56 100644 --- a/api/test/common/spinlock_benchmark.cc +++ b/api/test/common/spinlock_benchmark.cc @@ -100,32 +100,25 @@ static void BM_ProcYieldSpinLockThrashing(benchmark::State &s) [](SpinLockMutex &m) { m.unlock(); }); } -// SpinLock thrashing with thread::yield() after N spins. +// SpinLock thrashing with thread::yield(). static void BM_ThreadYieldSpinLockThrashing(benchmark::State &s) { - std::atomic mutex(false); - SpinThrash>( + std::atomic_flag mutex = ATOMIC_FLAG_INIT; + SpinThrash( s, mutex, - [](std::atomic &l) { - if (!l.exchange(true, std::memory_order_acq_rel)) + [](std::atomic_flag &l) { + uint32_t try_count = 0; + while (l.test_and_set(std::memory_order_acq_rel)) { - return; - } - for (std::size_t i = 0; i < 128; ++i) - { - if (!l.load(std::memory_order_acquire) && !l.exchange(true, std::memory_order_acq_rel)) - { - return; - } - - if (i % 32 == 0) + ++try_count; + if (try_count % 32) { std::this_thread::yield(); } } std::this_thread::yield(); }, - [](std::atomic &l) { l.store(false, std::memory_order_release); }); + [](std::atomic_flag &l) { l.clear(std::memory_order_release); }); } // Run the benchmarks at 2x thread/core and measure the amount of time to thrash around.