diff --git a/quantum/impl/quantum_read_write_mutex_impl.h b/quantum/impl/quantum_read_write_mutex_impl.h index 0998628..3031975 100644 --- a/quantum/impl/quantum_read_write_mutex_impl.h +++ b/quantum/impl/quantum_read_write_mutex_impl.h @@ -91,7 +91,8 @@ void ReadWriteMutex::upgradeToWrite() inline void ReadWriteMutex::upgradeToWrite(ICoroSync::Ptr sync) { - while (!tryUpgradeToWrite()) + bool pendingUpgrade = false; + while (!tryUpgradeToWriteImpl(&pendingUpgrade)) { yield(sync); } @@ -100,7 +101,19 @@ void ReadWriteMutex::upgradeToWrite(ICoroSync::Ptr sync) inline bool ReadWriteMutex::tryUpgradeToWrite() { - bool rc = _spinlock.tryUpgradeToWrite(); + return tryUpgradeToWriteImpl(nullptr); +} + +inline +bool ReadWriteMutex::tryUpgradeToWriteImpl(bool* pendingUpgrade) +{ + bool rc; + if (pendingUpgrade) { + rc = _spinlock.tryUpgradeToWrite(*pendingUpgrade); + } + else { + rc = _spinlock.tryUpgradeToWrite(); + } if (rc) { _taskId = local::taskId(); @@ -119,7 +132,6 @@ void ReadWriteMutex::unlockWrite() { assert(_taskId == local::taskId()); _taskId = TaskId{}; //reset the task id - _spinlock.unlockWrite(); } diff --git a/quantum/impl/quantum_read_write_spinlock_impl.h b/quantum/impl/quantum_read_write_spinlock_impl.h index 4ab0886..86390c3 100644 --- a/quantum/impl/quantum_read_write_spinlock_impl.h +++ b/quantum/impl/quantum_read_write_spinlock_impl.h @@ -71,25 +71,25 @@ namespace quantum { inline void ReadWriteSpinLock::lockRead() { - SpinLockUtil::lockRead(_count); + SpinLockUtil::lockRead(_count, LockTraits::Attempt::Unlimited); } inline void ReadWriteSpinLock::lockWrite() { - SpinLockUtil::lockWrite(_count); + SpinLockUtil::lockWrite(_count, LockTraits::Attempt::Unlimited); } inline bool ReadWriteSpinLock::tryLockRead() { - return SpinLockUtil::lockRead(_count, true); + return SpinLockUtil::lockRead(_count, LockTraits::Attempt::Once); } inline bool ReadWriteSpinLock::tryLockWrite() { - return SpinLockUtil::lockWrite(_count, true); + return SpinLockUtil::lockWrite(_count, LockTraits::Attempt::Once); } inline @@ -107,13 +107,19 @@ void ReadWriteSpinLock::unlockWrite() inline void ReadWriteSpinLock::upgradeToWrite() { - SpinLockUtil::upgradeToWrite(_count); + SpinLockUtil::upgradeToWrite(_count, LockTraits::Attempt::Unlimited); } inline bool ReadWriteSpinLock::tryUpgradeToWrite() { - return SpinLockUtil::upgradeToWrite(_count, true); + return SpinLockUtil::upgradeToWrite(_count, LockTraits::Attempt::Once); +} + +inline +bool ReadWriteSpinLock::tryUpgradeToWrite(bool& pendingUpgrade) +{ + return SpinLockUtil::upgradeToWrite(_count, pendingUpgrade, LockTraits::Attempt::Reentrant); } inline diff --git a/quantum/impl/quantum_spinlock_impl.h b/quantum/impl/quantum_spinlock_impl.h index 48db5a3..34fd717 100644 --- a/quantum/impl/quantum_spinlock_impl.h +++ b/quantum/impl/quantum_spinlock_impl.h @@ -31,13 +31,13 @@ namespace quantum { inline void SpinLock::lock() { - SpinLockUtil::lockWrite(_flag); + SpinLockUtil::lockWrite(_flag, LockTraits::Attempt::Unlimited); } inline bool SpinLock::tryLock() { - return SpinLockUtil::lockWrite(_flag, true); + return SpinLockUtil::lockWrite(_flag, LockTraits::Attempt::Once); } inline diff --git a/quantum/quantum_read_write_mutex.h b/quantum/quantum_read_write_mutex.h index 4a7414a..fb78e96 100644 --- a/quantum/quantum_read_write_mutex.h +++ b/quantum/quantum_read_write_mutex.h @@ -194,6 +194,8 @@ class ReadWriteMutex }; private: + bool tryUpgradeToWriteImpl(bool* pendingUpgrade); + // Members mutable ReadWriteSpinLock _spinlock; mutable TaskId _taskId; diff --git a/quantum/quantum_read_write_spinlock.h b/quantum/quantum_read_write_spinlock.h index 3842f96..f7c9722 100644 --- a/quantum/quantum_read_write_spinlock.h +++ b/quantum/quantum_read_write_spinlock.h @@ -67,8 +67,11 @@ class ReadWriteSpinLock void upgradeToWrite(); /// @brief Attempts to upgrade a reader lock to a writer lock. This operation never blocks. + /// @param pendingUpdate Use this overload when calling this function in a loop. + /// PendingUpdate must be initialized to 'false'. /// @return True if the lock operation succeeded, false otherwise. bool tryUpgradeToWrite(); + bool tryUpgradeToWrite(bool& pendingUpdate); /// @bried Determines if this object is either read or write locked. /// @return True if locked, false otherwise. diff --git a/quantum/quantum_spinlock_traits.h b/quantum/quantum_spinlock_traits.h index 38c7bb2..c7e5e90 100644 --- a/quantum/quantum_spinlock_traits.h +++ b/quantum/quantum_spinlock_traits.h @@ -75,6 +75,11 @@ struct SpinLockTraits { //============================================================================== struct LockTraits { + enum class Attempt : uint8_t { + Once, ///> Try to obtain lock once and return success of failure + Unlimited, ///> Try until lock is obtained + Reentrant ///> Try continuously but return on each iteration w/o blocking + }; using TryToLock = std::try_to_lock_t; using AdoptLock = std::adopt_lock_t; using DeferLock = std::defer_lock_t; diff --git a/quantum/util/impl/quantum_sequencer_impl.h b/quantum/util/impl/quantum_sequencer_impl.h index f9cd57c..5e8b59d 100644 --- a/quantum/util/impl/quantum_sequencer_impl.h +++ b/quantum/util/impl/quantum_sequencer_impl.h @@ -373,12 +373,8 @@ Sequencer::singleSequenceKeyTaskSchedule FUNC&& func, ARGS&&... args) { - // find the dependent - typename ContextMap::iterator contextIt = sequencer._contexts.find(sequenceKey); - if (contextIt == sequencer._contexts.end()) - { - contextIt = sequencer._contexts.emplace(sequenceKey, SequenceKeyData()).first; - } + // find the dependent or create a new element + typename ContextMap::iterator contextIt = sequencer._contexts.emplace(sequenceKey, SequenceKeyData()).first; // update stats contextIt->second._stats->incrementPostedTaskCount(); contextIt->second._stats->incrementPendingTaskCount(); diff --git a/quantum/util/impl/quantum_spinlock_util_impl.h b/quantum/util/impl/quantum_spinlock_util_impl.h index 048afba..c44afa1 100644 --- a/quantum/util/impl/quantum_spinlock_util_impl.h +++ b/quantum/util/impl/quantum_spinlock_util_impl.h @@ -55,141 +55,155 @@ void SpinLockUtil::pauseCPU() inline bool SpinLockUtil::lockWrite(std::atomic_uint32_t &flag, - bool tryOnce) + LockTraits::Attempt attempt) { - while (true) - { + size_t numBackoffs = 0; spin: - if (!tryOnce) { - spinWaitWriter(flag); - } - //Try acquiring the lock - uint32_t oldValue = set(0, 0); - uint32_t newValue = set(0, -1); - while (!flag.compare_exchange_weak(oldValue, newValue, std::memory_order_acquire)) + if (attempt != LockTraits::Attempt::Once) { + spinWaitWriter(flag); + } + //Try acquiring the lock + uint32_t oldValue = set(0, 0); + uint32_t newValue = set(0, -1); + while (!flag.compare_exchange_weak(oldValue, newValue, std::memory_order_acquire)) + { + //lock is already taken + if (owners(oldValue) != 0) { - //lock is already taken - if (owners(oldValue) != 0) + if (attempt == LockTraits::Attempt::Once) { - if (tryOnce) - { - return false; - } - backoff(); - //spin wait again - goto spin; + return false; } - //maintain the value for the pending writers - newValue = set(upgrades(oldValue), -1); - pauseCPU(); + backoff(numBackoffs); + //spin wait again + goto spin; } - reset(); - return true; + //maintain the value for the pending writers + newValue = set(upgrades(oldValue), -1); + pauseCPU(); } + return true; } inline bool SpinLockUtil::upgradeToWrite(std::atomic_uint32_t &flag, - bool tryOnce) + LockTraits::Attempt attempt) { bool pendingUpgrade = false; - while (true) { + return upgradeToWriteImpl(flag, pendingUpgrade, attempt); +} + +inline +bool SpinLockUtil::upgradeToWrite(std::atomic_uint32_t &flag, + bool& pendingUpgrade, + LockTraits::Attempt attempt) +{ + return upgradeToWriteImpl(flag, pendingUpgrade, attempt); +} + +inline +bool SpinLockUtil::upgradeToWriteImpl(std::atomic_uint32_t &flag, + bool& pendingUpgrade, + LockTraits::Attempt attempt) +{ + size_t numBackoffs = 0; spin: - if (pendingUpgrade && !tryOnce) - { - spinWaitUpgradedReader(flag); - } - //Try acquiring the lock - uint32_t oldValue = set(0, 1); - uint32_t newValue = set(0, -1); - while (!flag.compare_exchange_weak(oldValue, newValue, std::memory_order_acquire)) + if (pendingUpgrade && (attempt != LockTraits::Attempt::Once)) + { + spinWaitUpgradedReader(flag); + } + //Try acquiring the lock + uint32_t oldValue = set(0, 1); + uint32_t newValue = set(0, -1); + while (!flag.compare_exchange_weak(oldValue, newValue, std::memory_order_acquire)) + { + if (!pendingUpgrade) { - if (!pendingUpgrade) + //We are attempting to upgrade. + if (owners(oldValue) > 1) { - //We are attempting to upgrade. - if (owners(oldValue) > 1) + if (attempt == LockTraits::Attempt::Once) { - if (tryOnce) - { - return false; //cannot upgrade immediately - } - //Increment pending upgrades and decrement readers - newValue = add(oldValue, 1, -1); - } - else //owners(oldValue) == 1 - { - //One reader left so upgrade to writer directly. - //There are other pending writers so we must preserve the value - newValue = set(upgrades(oldValue), -1); + return false; //cannot upgrade immediately } + //Increment pending upgrades and decrement readers + newValue = add(oldValue, 1, -1); } - else + else //owners(oldValue) == 1 { - //Upgrade pending. To acquire lock the low value must be 0. - if (owners(oldValue) != 0) + //One reader left so upgrade to writer directly. + //There are other pending writers so we must preserve the value + newValue = set(upgrades(oldValue), -1); + } + } + else + { + //Upgrade pending. To acquire lock the low value must be 0. + if (owners(oldValue) != 0) + { + //lock is already taken or there are still readers + if (attempt != LockTraits::Attempt::Unlimited) { - //lock is already taken or there are still readers - if (tryOnce) - { - return false; - } - backoff(); - //spin wait until we can upgrade again - goto spin; + return false; } - //We can upgrade. Decrement pending writers and upgrade to write - newValue = set(upgrades(oldValue)-1, -1); + backoff(numBackoffs); + //spin wait until we can upgrade again + goto spin; } - pauseCPU(); + //We can upgrade. Decrement pending writers and upgrade to write + newValue = set(upgrades(oldValue)-1, -1); } - if (owners(oldValue) > 1) + pauseCPU(); + } + if (owners(oldValue) > 1) + { + //We terminated the loop from H|L -> H+1|L-1 because there were multiple readers. + //Therefore we are still pending until all readers terminate. + pendingUpgrade = true; + if (attempt == LockTraits::Attempt::Reentrant) { - //We terminated the loop from H|L -> H+1|L-1 because there were multiple readers. - //Therefore we are still pending until all readers terminate. - pendingUpgrade = true; - goto spin; + return false; //we will get called again } - //We terminated the loop either from H|0->H-1|-1 OR H|1->H|-1 and obtained the lock - assert((owners(oldValue) == 0) || (owners(oldValue) == 1)); - reset(); - return true; + backoff(numBackoffs); + //spin wait until we can upgrade again + goto spin; } + //We terminated the loop either from H|0->H-1|-1 OR H|1->H|-1 and obtained the lock + assert((owners(oldValue) == 0) || (owners(oldValue) == 1)); + return true; } inline bool SpinLockUtil::lockRead(std::atomic_uint32_t &flag, - bool tryOnce) + LockTraits::Attempt attempt) { - while (true) - { + size_t numBackoffs = 0; spin: - if (!tryOnce) - { - spinWaitReader(flag); - } - //Try acquiring the lock - uint32_t oldValue = set(0, 0); - uint32_t newValue = set(0, 1); - while (!flag.compare_exchange_weak(oldValue, newValue, std::memory_order_acquire)) + if (attempt != LockTraits::Attempt::Once) + { + spinWaitReader(flag); + } + //Try acquiring the lock + uint32_t oldValue = set(0, 0); + uint32_t newValue = set(0, 1); + while (!flag.compare_exchange_weak(oldValue, newValue, std::memory_order_acquire)) + { + if ((upgrades(oldValue) > 0) || (owners(oldValue) == -1)) { - if ((upgrades(oldValue) > 0) || (owners(oldValue) == -1)) + //lock is already taken or we have pending write upgrades + if (attempt == LockTraits::Attempt::Once) { - //lock is already taken or we have pending write upgrades - if (tryOnce) - { - return false; - } - backoff(); - //spin wait again - goto spin; + return false; } - newValue = add(oldValue, 0, 1); - pauseCPU(); + backoff(numBackoffs); + //spin wait again + goto spin; } - //We obtained the lock so exit loop - reset(); - return true; + newValue = add(oldValue, 0, 1); + pauseCPU(); } + //We obtained the lock so exit loop + return true; } inline @@ -233,18 +247,11 @@ void SpinLockUtil::unlockWrite(std::atomic_uint32_t &flag) } inline -void SpinLockUtil::reset() -{ - numYields() = 0; - numSpins() = 0; -} - -inline -void SpinLockUtil::yieldOrSleep() +void SpinLockUtil::yieldOrSleep(size_t& num) { - if (numYields() < SpinLockTraits::numYieldsBeforeSleep()) + if (num < SpinLockTraits::numYieldsBeforeSleep()) { - ++numYields(); + ++num; std::this_thread::yield(); } else @@ -255,54 +262,60 @@ void SpinLockUtil::yieldOrSleep() } inline -void SpinLockUtil::backoff() +size_t SpinLockUtil::generateBackoff() { using Distribution = std::uniform_int_distribution; static thread_local std::mt19937_64 gen(std::random_device{}()); static thread_local Distribution distribution; - if (numSpins() == 0) + + assert(SpinLockTraits::minSpins() <= SpinLockTraits::maxSpins()); + if ((SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::EqualStep) || + (SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::Random)) { - //Initialize for the first time - assert(SpinLockTraits::minSpins() <= SpinLockTraits::maxSpins()); - if ((SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::EqualStep) || - (SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::Random)) - { - //Generate a number from the entire range - numSpins() = distribution(gen, Distribution::param_type - {SpinLockTraits::minSpins(), SpinLockTraits::maxSpins()}); - } - else - { - //Generate a number below min and add it to the min. - numSpins() = SpinLockTraits::minSpins() + - distribution(gen, Distribution::param_type - {0, SpinLockTraits::minSpins()}); - } + //Generate a number from the entire range + return distribution(gen, Distribution::param_type + {SpinLockTraits::minSpins(), SpinLockTraits::maxSpins()}); } - else if (numSpins() < SpinLockTraits::maxSpins()) + else + { + //Generate a number below min and add it to the min. + return SpinLockTraits::minSpins() + + distribution(gen, Distribution::param_type + {0, SpinLockTraits::minSpins()}); + } +} + +inline +void SpinLockUtil::backoff(size_t& num) +{ + if (num == 0) + { + num = generateBackoff(); + } + else if (num < SpinLockTraits::maxSpins()) { if (SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::Linear) { - numSpins() += numSpins(); + num += SpinLockTraits::minSpins(); } else if (SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::Exponential) { - numSpins() *= 2; + num *= 2; } else if (SpinLockTraits::backoffPolicy() == SpinLockTraits::BackoffPolicy::Random) { //Generate a new value each time - numSpins() = distribution(gen, Distribution::param_type - {SpinLockTraits::minSpins(), SpinLockTraits::maxSpins()}); + num = generateBackoff(); } //Check that we don't exceed max spins - if (numSpins() > SpinLockTraits::maxSpins()) + if (num > SpinLockTraits::maxSpins()) { - numSpins() = SpinLockTraits::maxSpins(); + //Reset back to initial value + num = generateBackoff(); } } //Spin - for (size_t i = 0; i < numSpins(); ++i) + for (size_t i = 0; i < num; ++i) { pauseCPU(); } @@ -312,6 +325,7 @@ inline void SpinLockUtil::spinWaitWriter(std::atomic_uint32_t& flag) { size_t numIters = 0; + size_t numYields = 0; while (owners(flag.load(std::memory_order_relaxed) != 0)) { if (numIters < SpinLockTraits::maxSpins()) @@ -322,7 +336,7 @@ void SpinLockUtil::spinWaitWriter(std::atomic_uint32_t& flag) else { //Yield or sleep the thread instead of spinning - yieldOrSleep(); + yieldOrSleep(numYields); } } } @@ -331,6 +345,7 @@ inline void SpinLockUtil::spinWaitUpgradedReader(std::atomic_uint32_t& flag) { size_t numIters = 0; + size_t numYields = 0; while (true) { uint32_t v = flag.load(std::memory_order_relaxed); @@ -344,7 +359,7 @@ void SpinLockUtil::spinWaitUpgradedReader(std::atomic_uint32_t& flag) else { //Yield or sleep the thread instead of spinning - yieldOrSleep(); + yieldOrSleep(numYields); } } else @@ -358,6 +373,7 @@ inline void SpinLockUtil::spinWaitReader(std::atomic_uint32_t& flag) { size_t numIters = 0; + size_t numYields = 0; while (true) { uint32_t v = flag.load(std::memory_order_relaxed); @@ -371,7 +387,7 @@ void SpinLockUtil::spinWaitReader(std::atomic_uint32_t& flag) else { //Yield or sleep the thread instead of spinning - yieldOrSleep(); + yieldOrSleep(numYields); } } else @@ -406,20 +422,6 @@ uint16_t SpinLockUtil::numPendingWriters(const std::atomic_uint32_t &flag) return upgrades(flag.load(std::memory_order_acquire)); } -inline -size_t& SpinLockUtil::numYields() -{ - static thread_local size_t _numYields = 0; - return _numYields; -} - -inline -size_t& SpinLockUtil::numSpins() -{ - static thread_local size_t _numSpins = 0; - return _numSpins; -} - inline constexpr uint32_t SpinLockUtil::set(int16_t upgrades, int16_t owners) { return ((uint32_t)upgrades<<16) | (owners&mask); diff --git a/quantum/util/quantum_spinlock_util.h b/quantum/util/quantum_spinlock_util.h index c98a25c..2eb9db4 100644 --- a/quantum/util/quantum_spinlock_util.h +++ b/quantum/util/quantum_spinlock_util.h @@ -16,14 +16,22 @@ #ifndef QUANTUM_QUANTUM_SPINLOCK_UTIL_H #define QUANTUM_QUANTUM_SPINLOCK_UTIL_H +#include + namespace Bloomberg { namespace quantum { //Adapted from https://geidav.wordpress.com/tag/test-and-test-and-set/ struct SpinLockUtil { - static bool lockWrite(std::atomic_uint32_t& flag, bool tryOnce = false); - static bool lockRead(std::atomic_uint32_t& flag, bool tryOnce = false); - static bool upgradeToWrite(std::atomic_uint32_t& flag, bool tryOnce = false); + static bool lockWrite(std::atomic_uint32_t& flag, + LockTraits::Attempt); + static bool lockRead(std::atomic_uint32_t& flag, + LockTraits::Attempt); + static bool upgradeToWrite(std::atomic_uint32_t& flag, + LockTraits::Attempt); + static bool upgradeToWrite(std::atomic_uint32_t& flag, + bool& pendingUpgrade, + LockTraits::Attempt); static void unlockRead(std::atomic_uint32_t& flag); static void unlockWrite(std::atomic_uint32_t& flag); static bool isLocked(const std::atomic_uint32_t& flag); @@ -31,14 +39,15 @@ struct SpinLockUtil { static uint16_t numReaders(const std::atomic_uint32_t& flag); static uint16_t numPendingWriters(const std::atomic_uint32_t& flag); private: - static void reset(); - static void yieldOrSleep(); - static void backoff(); + static bool upgradeToWriteImpl(std::atomic_uint32_t& flag, + bool& pendingUpgrade, + LockTraits::Attempt); + static void yieldOrSleep(size_t& num); + static size_t generateBackoff(); + static void backoff(size_t& num); static void spinWaitWriter(std::atomic_uint32_t& flag); static void spinWaitReader(std::atomic_uint32_t& flag); static void spinWaitUpgradedReader(std::atomic_uint32_t& flag); - static size_t& numYields(); - static size_t& numSpins(); static void pauseCPU(); //Bit manipulations static constexpr uint32_t set(int16_t upgrades, int16_t owners); diff --git a/tests/quantum_locks_tests.cpp b/tests/quantum_locks_tests.cpp index e7796a0..cd5c7bc 100644 --- a/tests/quantum_locks_tests.cpp +++ b/tests/quantum_locks_tests.cpp @@ -21,6 +21,8 @@ #include using namespace Bloomberg::quantum; +using ms = std::chrono::milliseconds; +using us = std::chrono::microseconds; #ifdef BOOST_USE_VALGRIND int spins = 100; @@ -37,7 +39,7 @@ void runnable(SpinLock* exclusiveLock) { exclusiveLock->lock(); locksTaken++; val++; - std::this_thread::sleep_for(std::chrono::microseconds(500)); + std::this_thread::sleep_for(us(500)); exclusiveLock->unlock(); } } @@ -58,14 +60,14 @@ void runThreads(int num) } auto end = std::chrono::high_resolution_clock::now(); std::cout << "Total spin time " << num << ": " - << std::chrono::duration_cast(end-start).count() + << std::chrono::duration_cast(end-start).count() << "ms" << std::endl; } void spinlockSettings( size_t min, size_t max, - std::chrono::microseconds sleepUs, + us sleepUs, size_t numYields, int num, int enable) @@ -81,7 +83,7 @@ void spinlockSettings( } } -TEST(Spinlock, Spinlock) +TEST(Locks, Spinlock) { val = 0; SpinLock spin; @@ -106,7 +108,7 @@ TEST(Spinlock, Spinlock) EXPECT_EQ(0, val); } -TEST(Spinlock, Guards) +TEST(Locks, Spinlock_Guards) { SpinLock spin; { //Basic @@ -153,21 +155,21 @@ TEST(Spinlock, Guards) EXPECT_FALSE(spin.isLocked()); } -TEST(Spinlock, HighContention) +TEST(Locks, Spinlock_HighContention) { val = 0; int enable = -1; int i = 0; - spinlockSettings(500, 10000, std::chrono::microseconds(100), 2, i++, enable); //0 - spinlockSettings(0, 20000, std::chrono::microseconds(100), 3, i++, enable); //1 - spinlockSettings(100, 5000, std::chrono::microseconds(200), 3, i++, enable); //2 - spinlockSettings(500, 200000, std::chrono::microseconds(0), 5, i++, enable); //3 - spinlockSettings(500, 20000, std::chrono::microseconds(1000), 0, i++, enable); //4 - spinlockSettings(500, 2000, std::chrono::microseconds(0), 0, i++, enable); //5 - spinlockSettings(0, 0, std::chrono::microseconds(10), 2000, i++, enable); //6 + spinlockSettings(500, 10000, us(100), 2, i++, enable); //0 + spinlockSettings(0, 20000, us(100), 3, i++, enable); //1 + spinlockSettings(100, 5000, us(200), 3, i++, enable); //2 + spinlockSettings(500, 200000, us(0), 5, i++, enable); //3 + spinlockSettings(500, 20000, us(1000), 0, i++, enable); //4 + spinlockSettings(500, 2000, us(0), 0, i++, enable); //5 + spinlockSettings(0, 0, us(10), 2000, i++, enable); //6 } -TEST(ReadWriteSpinLock, LockReadMultipleTimes) +TEST(Locks, ReadWriteSpinLock_LockReadMultipleTimes) { ReadWriteSpinLock spin; EXPECT_EQ(0, spin.numReaders()); @@ -184,7 +186,7 @@ TEST(ReadWriteSpinLock, LockReadMultipleTimes) EXPECT_FALSE(spin.isLocked()); } -TEST(ReadWriteSpinLock, LockReadAndWrite) +TEST(Locks, ReadWriteSpinLock_LockReadAndWrite) { int num = spins; int val = 0; @@ -224,7 +226,7 @@ TEST(ReadWriteSpinLock, LockReadAndWrite) EXPECT_EQ(0, val); } -TEST(ReadWriteSpinLock, LockReadAndWriteList) +TEST(Locks, ReadWriteSpinLock_LockReadAndWriteList) { int num = spins; std::list val; @@ -275,7 +277,7 @@ TEST(ReadWriteSpinLock, LockReadAndWriteList) EXPECT_EQ(0, val.size()); } -TEST(ReadWriteSpinLock, SingleLocks) +TEST(Locks, ReadWriteSpinLock_SingleLocks) { ReadWriteSpinLock lock; @@ -303,7 +305,7 @@ TEST(ReadWriteSpinLock, SingleLocks) EXPECT_EQ(0, lock.numReaders()); } -TEST(ReadWriteSpinLock, UnlockingUnlockedIsNoOp) +TEST(Locks, ReadWriteSpinLock_UnlockingUnlockedIsNoOp) { ReadWriteSpinLock lock; @@ -316,7 +318,7 @@ TEST(ReadWriteSpinLock, UnlockingUnlockedIsNoOp) EXPECT_FALSE(lock.isLocked()); } -TEST(ReadWriteSpinLock, TryLocks) +TEST(Locks, ReadWriteSpinLock_TryLocks) { ReadWriteSpinLock lock; @@ -333,7 +335,7 @@ TEST(ReadWriteSpinLock, TryLocks) EXPECT_FALSE(lock.tryLockRead()); } -TEST(ReadWriteSpinLock, Guards) +TEST(Locks, ReadWriteSpinLock_Guards) { ReadWriteSpinLock lock; @@ -476,7 +478,7 @@ TEST(ReadWriteSpinLock, Guards) EXPECT_FALSE(lock.isLocked()); } -TEST(ReadWriteSpinLock, UpgradeLock) +TEST(Locks, ReadWriteSpinLock_UpgradeLock) { ReadWriteSpinLock lock; lock.lockRead(); @@ -488,7 +490,7 @@ TEST(ReadWriteSpinLock, UpgradeLock) EXPECT_EQ(3, lock.numReaders()); EXPECT_EQ(0, lock.numPendingWriters()); std::thread t([&lock]() { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); + std::this_thread::sleep_for(ms(100)); EXPECT_EQ(2, lock.numReaders()); lock.unlockRead(); lock.unlockRead(); @@ -504,7 +506,7 @@ TEST(ReadWriteSpinLock, UpgradeLock) t.join(); } -TEST(ReadWriteSpinLock, UpgradeSingleReader) +TEST(Locks, ReadWriteSpinLock_UpgradeSingleReader) { ReadWriteSpinLock lock; lock.lockRead(); @@ -522,7 +524,7 @@ TEST(ReadWriteSpinLock, UpgradeSingleReader) EXPECT_EQ(0, lock.numReaders()); } -TEST(ReadWriteSpinLock, TryUpgradeSingleReader) +TEST(Locks, ReadWriteSpinLock_TryUpgradeSingleReader) { ReadWriteSpinLock lock; lock.lockRead(); @@ -534,7 +536,7 @@ TEST(ReadWriteSpinLock, TryUpgradeSingleReader) EXPECT_FALSE(lock.isLocked()); } -TEST(ReadWriteSpinLock, UpgradeMultipleReaders) +TEST(Locks, ReadWriteSpinLock_UpgradeMultipleReaders) { ReadWriteSpinLock lock; lock.lockRead(); @@ -547,7 +549,7 @@ TEST(ReadWriteSpinLock, UpgradeMultipleReaders) //start a bunch of parallel readers which will then upgrade to writers std::vector threads; - auto timeToWake = std::chrono::system_clock::now() + std::chrono::milliseconds(10); + auto timeToWake = std::chrono::system_clock::now() + ms(10); std::atomic_int count{0}; for (int i = 0; i < 10; ++i) { threads.emplace_back([&lock, &timeToWake, &count]() { @@ -555,7 +557,7 @@ TEST(ReadWriteSpinLock, UpgradeMultipleReaders) ++count; std::this_thread::sleep_until(timeToWake); lock.upgradeToWrite(); - std::this_thread::sleep_for(std::chrono::milliseconds(10)); + std::this_thread::sleep_for(ms(10)); EXPECT_GE(lock.numPendingWriters(), 0); EXPECT_TRUE(lock.isWriteLocked()); lock.unlockWrite(); @@ -563,7 +565,7 @@ TEST(ReadWriteSpinLock, UpgradeMultipleReaders) } while (count < 10) { //wait for threads to lock read - std::this_thread::sleep_for(std::chrono::milliseconds(10)); + std::this_thread::sleep_for(ms(10)); } lock.upgradeToWrite(); //this will block waiting for the other threads to finish EXPECT_TRUE(lock.isWriteLocked()); @@ -576,7 +578,7 @@ TEST(ReadWriteSpinLock, UpgradeMultipleReaders) EXPECT_FALSE(lock.isLocked()); } -TEST(ReadWriteSpinLock, UpgradingBlockedMultipleReaders) +TEST(Locks, ReadWriteSpinLock_UpgradingBlockedMultipleReaders) { std::vector values; ReadWriteSpinLock lock; @@ -585,14 +587,14 @@ TEST(ReadWriteSpinLock, UpgradingBlockedMultipleReaders) //start a bunch of parallel readers which will then upgrade to writers std::vector threads; - auto timeToWake = std::chrono::system_clock::now() + std::chrono::milliseconds(10); + auto timeToWake = std::chrono::system_clock::now() + ms(10); std::atomic_int count{0}; for (int i = 0; i < 10; ++i) { threads.emplace_back([&lock, &timeToWake, &values, &count, i]() { if (i == 9) { while (count < 9) { //make sure all other readers are blocked - std::this_thread::sleep_for(std::chrono::milliseconds(10)); + std::this_thread::sleep_for(ms(10)); } lock.unlockRead(); //unblock the pending writer } @@ -624,7 +626,7 @@ TEST(ReadWriteSpinLock, UpgradingBlockedMultipleReaders) // READWRITEMUTEX TESTS //============================================================================== -TEST(ReadWriteMutex, SingleLocks) +TEST(Locks, ReadWriteMutex_SingleLocks) { ReadWriteMutex mutex; @@ -675,7 +677,7 @@ TEST(ReadWriteMutex, SingleLocks) EXPECT_EQ(0, mutex.numReaders()); } -TEST(ReadWriteMutex, TryLocks) +TEST(Locks, ReadWriteMutex_TryLocks) { ReadWriteMutex mutex; @@ -703,10 +705,51 @@ TEST(ReadWriteMutex, TryLocks) EXPECT_TRUE(mutex.isWriteLocked()); EXPECT_FALSE(mutex.tryLockRead()); EXPECT_FALSE(mutex.isReadLocked()); +} +TEST(Locks, ReadWriteMutex_UpgradeToWrite) +{ + int numThreads = 2; + ReadWriteMutex rwMutex; + int locksAcquired = -1; + ConditionVariable cond; + Mutex mutex; + std::atomic numReadLocks{0}; + + auto job = [&]() mutable { + ReadWriteMutex::Guard rwGuard(rwMutex, lock::acquireRead); + numReadLocks++; + { + Mutex::Guard guard(mutex); + cond.wait(mutex, [&]() -> bool { return locksAcquired == 0; }); + } + rwGuard.upgradeToWrite(); + locksAcquired++; + }; + + std::vector threads; + + for (int i = 0; i < numThreads; i++) { + threads.emplace_back(job); + } + + while (numReadLocks < numThreads) { + std::this_thread::sleep_for(ms(100)); + } + { + Mutex::Guard guard(mutex); + locksAcquired=0; + } + cond.notifyAll(); + + for (auto&& t : threads) { + t.join(); + } + + EXPECT_EQ(numThreads, locksAcquired); } -TEST(ReadWriteMutex, Guards) +TEST(Locks, ReadWriteMutex_Guards) { ReadWriteMutex mutex; @@ -944,7 +987,7 @@ TEST(ReadWriteMutex, Guards) mutex.unlockWrite(); } -TEST(ReadWriteMutex, MultipleReadLocks) { +TEST(Locks, ReadWriteMutex_MultipleReadLocks) { ReadWriteMutex mutex; bool run = true; diff --git a/tests/quantum_tests.cpp b/tests/quantum_tests.cpp index 3bc1f13..d8e4cbd 100644 --- a/tests/quantum_tests.cpp +++ b/tests/quantum_tests.cpp @@ -1025,7 +1025,7 @@ TEST_P(PromiseTest, FutureTimeout) //check elapsed time size_t elapsed = std::chrono::duration_cast(end-start).count(); EXPECT_LT(elapsed, (size_t)300); - EXPECT_EQ(status, std::future_status::timeout); + EXPECT_EQ((int)status, (int)std::future_status::timeout); } TEST_P(PromiseTest, FutureWithoutTimeout) @@ -1044,7 +1044,7 @@ TEST_P(PromiseTest, FutureWithoutTimeout) size_t elapsed = std::chrono::duration_cast(end-start).count(); EXPECT_GE(elapsed, (size_t)100); EXPECT_LT(elapsed, (size_t)300); - EXPECT_EQ(status, std::future_status::ready); + EXPECT_EQ((int)status, (int)std::future_status::ready); } TEST_P(PromiseTest, WaitForAllFutures) @@ -1489,7 +1489,7 @@ TEST_P(FutureJoinerTest, JoinCoroFutures) EXPECT_EQ(output, std::vector({0,1,2,3,4,5,6,7,8,9})); } -TEST(SharedQueueTest, PerformanceTest1) +TEST(SharedQueueTest, PerformanceTest) { // The code below enqueues 30 short tasks, then 1 large task, and then 30 short tasks. // The intuition is that in the shared-coro mode, while one thread is busy with the large task,