From ca23ea6cca730e080d427bce44421399de4c4994 Mon Sep 17 00:00:00 2001 From: Bernhard Kragl Date: Tue, 24 May 2022 12:42:34 +0200 Subject: [PATCH] Implement `Mutex::try_lock` Without `try_lock` it was easier to justify context switches, because acquire was a right mover (we only needed a context switch before) and release was a left mover (we only needed a context switch after). However, with `try_lock` that is not the case anymore. This commit argues why we need a context switch at the end of `lock` and `try_lock` (both in the success and failure case), and why we do not need a context switch at the beginning of `try_lock` and `MutexGuard::drop`. --- src/sync/mutex.rs | 96 +++++++++++++-- tests/basic/condvar.rs | 155 +++++++++++++----------- tests/basic/dfs.rs | 113 ++++++++++++----- tests/basic/mutex.rs | 229 ++++++++++++++++++++++++++++++++++- tests/basic/replay.rs | 2 +- tests/demo/bounded_buffer.rs | 2 +- 6 files changed, 485 insertions(+), 112 deletions(-) diff --git a/src/sync/mutex.rs b/src/sync/mutex.rs index 4b6119f7..61154ccc 100644 --- a/src/sync/mutex.rs +++ b/src/sync/mutex.rs @@ -61,13 +61,18 @@ impl Mutex { panic!("deadlock! task {:?} tried to acquire a Mutex it already holds", me); } ExecutionState::with(|s| s.current_mut().block()); + drop(state); + + // Note that we only need a context switch when we are blocked, but not if the lock is + // available. Consider that there is another thread `t` that also wants to acquire the + // lock. At the last context switch (where we were chosen), `t` must have been already + // runnable and could have been chosen by the scheduler instead. Also, if we want to + // re-acquiring the lock immediately after having it released, we know that the release + // had a context switch that allowed other threads to acquire in between. + thread::switch(); + state = self.state.borrow_mut(); } - drop(state); - - // Acquiring a lock is a yield point - thread::switch(); - let mut state = self.state.borrow_mut(); // Once the scheduler has resumed this thread, we are clear to become its holder. assert!(state.waiters.remove(me)); assert!(state.holder.is_none()); @@ -84,7 +89,7 @@ impl Mutex { drop(state); // Grab a `MutexGuard` from the inner lock, which we must be able to acquire here - match self.inner.try_lock() { + let result = match self.inner.try_lock() { Ok(guard) => Ok(MutexGuard { inner: Some(guard), mutex: self, @@ -94,7 +99,15 @@ impl Mutex { mutex: self, })), Err(TryLockError::WouldBlock) => panic!("mutex state out of sync"), - } + }; + + // We need to let other threads in here so they may fail a `try_lock`. This is the case + // because the current thread holding the lock might not have any further context switches + // until after releasing the lock. The `concurrent_lock_try_lock` test illustrates this + // scenario and would fail if this context switch is not here. + thread::switch(); + + result } /// Attempts to acquire this lock. @@ -102,7 +115,68 @@ impl Mutex { /// If the lock could not be acquired at this time, then Err is returned. This function does not /// block. pub fn try_lock(&self) -> TryLockResult> { - unimplemented!() + let me = ExecutionState::me(); + + let mut state = self.state.borrow_mut(); + trace!(holder=?state.holder, waiters=?state.waiters, "trying to acquire mutex {:p}", self); + + // We don't need a context switch here. There are two cases to analyze. + // * Consider that `state.holder == None` so that we manage to acquire the lock, but that + // there is another thread `t` that also wants to acquire. At the last context switch + // (where we were chosen), `t` must have been already runnable and could have been chosen + // by the scheduler instead. Then `t`'s acquire has a context switch that allows us to + // run into the `WouldBlock` case. + // * Consider that `state.holder == Some(t)` so that we run into the `WouldBlock` case, + // but that `t` wants to release. At the last context switch (where we were chosen), `t` + // must have been already runnable and could have been chosen by the scheduler instead. + // Then `t`'s release has a context switch that allows us to acquire the lock. + + let result = if let Some(holder) = state.holder { + trace!("`try_lock` failed to acquire mutex {:p} held by {:?}", self, holder); + + // Update the vector clock stored in the Mutex, because future threads that manage to + // acquire the lock have a causal dependency on this failed `try_lock`. + ExecutionState::with(|s| state.clock.update(s.get_clock(me))); + + Err(TryLockError::WouldBlock) + } else { + state.holder = Some(me); + + trace!("acquired mutex {:p}", self); + + // Re-block all other waiting threads, since we won the race to take this lock + for tid in state.waiters.iter() { + ExecutionState::with(|s| s.get_mut(tid).block()); + } + + // Grab a `MutexGuard` from the inner lock, which we must be able to acquire here + match self.inner.try_lock() { + Ok(guard) => Ok(MutexGuard { + inner: Some(guard), + mutex: self, + }), + Err(TryLockError::Poisoned(guard)) => Err(TryLockError::Poisoned(PoisonError::new(MutexGuard { + inner: Some(guard.into_inner()), + mutex: self, + }))), + Err(TryLockError::WouldBlock) => panic!("mutex state out of sync"), + } + }; + + // Update this thread's clock with the clock stored in the Mutex. + // We need to do the vector clock update even in the failing case, because there's a causal + // dependency: if the `try_lock` fails, the current thread `t1` knows that the thread `t2` + // that owns the lock is in its critical section, and therefore `t1` has a causal dependency + // on everything that happened before in `t2` (which is recorded in the Mutex's clock). + ExecutionState::with(|s| s.update_clock(&state.clock)); + drop(state); + + // We need to let other threads in here so they + // (a) may fail a `try_lock` (in case we acquired), or + // (b) may release the lock (in case we failed to acquire) so we can succeed in a subsequent `try_lock`. + thread::switch(); + + result } /// Consumes this mutex, returning the underlying data. @@ -153,6 +227,12 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { fn drop(&mut self) { + // We don't need a context switch here *before* releasing the lock. There are two cases to analyze. + // * Other threads that want to `lock` are still blocked at this point. + // * Other threads that want to `try_lock` and would fail at this point (but not after we release) + // were already runnable at the last context switch (which could have been right after we acquired) + // and could have been scheduled then to fail the `try_lock`. + self.inner = None; let mut state = self.mutex.state.borrow_mut(); diff --git a/tests/basic/condvar.rs b/tests/basic/condvar.rs index d055ce3f..5dd35799 100644 --- a/tests/basic/condvar.rs +++ b/tests/basic/condvar.rs @@ -232,49 +232,57 @@ fn notify_one_order() { /// From "Operating Systems: Three Easy Pieces", Figure 30.8. /// Demonstrates why a waiter needs to check the condition in a `while` loop, not an if. /// http://pages.cs.wisc.edu/~remzi/OSTEP/threads-cv.pdf -#[test] -#[should_panic(expected = "nothing to get")] fn producer_consumer_broken1() { - replay( - || { - let lock = Arc::new(Mutex::new(())); - let cond = Arc::new(Condvar::new()); - let count = Arc::new(AtomicUsize::new(0)); - - // Two consumers - for _ in 0..2 { - let lock = Arc::clone(&lock); - let cond = Arc::clone(&cond); - let count = Arc::clone(&count); - thread::spawn(move || { - for _ in 0..2 { - let mut guard = lock.lock().unwrap(); - if count.load(Ordering::SeqCst) == 0 { - guard = cond.wait(guard).unwrap(); - } - // get() - assert_eq!(count.load(Ordering::SeqCst), 1, "nothing to get"); - count.store(0, Ordering::SeqCst); - cond.notify_one(); - drop(guard); // explicit unlock to match the book - } - }); - } + let lock = Arc::new(Mutex::new(())); + let cond = Arc::new(Condvar::new()); + let count = Arc::new(AtomicUsize::new(0)); - // One producer + // Two consumers + for _ in 0..2 { + let lock = Arc::clone(&lock); + let cond = Arc::clone(&cond); + let count = Arc::clone(&count); + thread::spawn(move || { for _ in 0..2 { let mut guard = lock.lock().unwrap(); - if count.load(Ordering::SeqCst) == 1 { + if count.load(Ordering::SeqCst) == 0 { guard = cond.wait(guard).unwrap(); } - // put() - assert_eq!(count.load(Ordering::SeqCst), 0, "no space to put"); - count.store(1, Ordering::SeqCst); + // get() + assert_eq!(count.load(Ordering::SeqCst), 1, "nothing to get"); + count.store(0, Ordering::SeqCst); cond.notify_one(); - drop(guard); + drop(guard); // explicit unlock to match the book } - }, - "910215000000408224002229", + }); + } + + // One producer + for _ in 0..2 { + let mut guard = lock.lock().unwrap(); + if count.load(Ordering::SeqCst) == 1 { + guard = cond.wait(guard).unwrap(); + } + // put() + assert_eq!(count.load(Ordering::SeqCst), 0, "no space to put"); + count.store(1, Ordering::SeqCst); + cond.notify_one(); + drop(guard); + } +} + +#[test] +#[should_panic] +fn check_producer_consumer_broken1() { + check_random(producer_consumer_broken1, 5000) +} + +#[test] +#[should_panic(expected = "nothing to get")] +fn replay_roducer_consumer_broken1() { + replay( + producer_consumer_broken1, + "910219ccf2ead7a59dee9e4590000282249100208904", ) } @@ -282,50 +290,55 @@ fn producer_consumer_broken1() { /// with a while loop, not an if. /// Demonstrates why one condvar is not sufficient for a concurrent queue. /// http://pages.cs.wisc.edu/~remzi/OSTEP/threads-cv.pdf -#[test] -#[should_panic(expected = "deadlock")] fn producer_consumer_broken2() { - replay( - || { - let lock = Arc::new(Mutex::new(())); - let cond = Arc::new(Condvar::new()); - let count = Arc::new(AtomicUsize::new(0)); - - // Two consumers - for _ in 0..2 { - let lock = Arc::clone(&lock); - let cond = Arc::clone(&cond); - let count = Arc::clone(&count); - thread::spawn(move || { - for _ in 0..1 { - let mut guard = lock.lock().unwrap(); - while count.load(Ordering::SeqCst) == 0 { - guard = cond.wait(guard).unwrap(); - } - // get() - assert_eq!(count.load(Ordering::SeqCst), 1, "nothing to get"); - count.store(0, Ordering::SeqCst); - cond.notify_one(); - drop(guard); - } - }); - } + let lock = Arc::new(Mutex::new(())); + let cond = Arc::new(Condvar::new()); + let count = Arc::new(AtomicUsize::new(0)); - // One producer - for _ in 0..2 { + // Two consumers + for _ in 0..2 { + let lock = Arc::clone(&lock); + let cond = Arc::clone(&cond); + let count = Arc::clone(&count); + thread::spawn(move || { + for _ in 0..1 { let mut guard = lock.lock().unwrap(); - while count.load(Ordering::SeqCst) == 1 { + while count.load(Ordering::SeqCst) == 0 { guard = cond.wait(guard).unwrap(); } - // put() - assert_eq!(count.load(Ordering::SeqCst), 0, "no space to put"); - count.store(1, Ordering::SeqCst); + // get() + assert_eq!(count.load(Ordering::SeqCst), 1, "nothing to get"); + count.store(0, Ordering::SeqCst); cond.notify_one(); drop(guard); } - }, - "9102110090401004405202", - ) + }); + } + + // One producer + for _ in 0..2 { + let mut guard = lock.lock().unwrap(); + while count.load(Ordering::SeqCst) == 1 { + guard = cond.wait(guard).unwrap(); + } + // put() + assert_eq!(count.load(Ordering::SeqCst), 0, "no space to put"); + count.store(1, Ordering::SeqCst); + cond.notify_one(); + drop(guard); + } +} + +#[test] +#[should_panic] +fn check_producer_consumer_broken2() { + check_random(producer_consumer_broken2, 5000) +} + +#[test] +#[should_panic(expected = "deadlock")] +fn replay_roducer_consumer_broken2() { + replay(producer_consumer_broken2, "91021499a0ee829bee85922b104410200052a404") } /// From "Operating Systems: Three Easy Pieces", Figure 30.12. Like `producer_consumer_broken2`, but diff --git a/tests/basic/dfs.rs b/tests/basic/dfs.rs index 81b4997d..0af12ab0 100644 --- a/tests/basic/dfs.rs +++ b/tests/basic/dfs.rs @@ -48,6 +48,84 @@ fn trivial_two_threads() { assert_eq!(iterations.load(Ordering::SeqCst), 2); } +/// We have two threads T0 and T1 with the following lifecycle (with letter denoting each step): +/// * T0: spawns T1 (S), acquires lock (A), releases lock (R), finishes (F) +/// * T1: acquires lock (a), releases lock (r), finishes (f) +/// +/// Additionally, T0 and T1 may block before acquiring the lock, which we denote by B and b, +/// respectively. +/// +/// We have a total of 16 interleavings: 8 where the critical sections do not overlap (4 where +/// T0 goes first and 4 symmetric ones where T1 goes first), and 8 where the critical sections +/// overlap (4 where T0 goes first and T1 blocks, and 4 symmetric ones where T1 goes first and +/// T0 blocks). The following computation tree illustrates all interleavings. +/// +/// ``` +/// digraph G { +/// node[shape=point]; +/// "" -> "S" [label="S"]; +/// "S" -> "SA" [label="A"]; +/// "SA" -> "SAR" [label="R"]; +/// "SAR" -> "SARF" [label="F"]; +/// "SARF" -> "SARFa" [label="a"]; +/// "SARFa" -> "SARFar" [label="r"]; +/// "SARFar" -> "SARFarf" [label="f"]; +/// "SAR" -> "SARa" [label="a"]; +/// "SARa" -> "SARaF" [label="F"]; +/// "SARaF" -> "SARaFr" [label="r"]; +/// "SARaFr" -> "SARaFrf" [label="f"]; +/// "SARa" -> "SARar" [label="r"]; +/// "SARar" -> "SARarF" [label="F"]; +/// "SARarF" -> "SARarFf" [label="f"]; +/// "SARar" -> "SARarf" [label="f"]; +/// "SARarf" -> "SARarfF" [label="F"]; +/// "SA" -> "SAb" [label="b"]; +/// "SAb" -> "SAbR" [label="R"]; +/// "SAbR" -> "SAbRF" [label="F"]; +/// "SAbRF" -> "SAbRFa" [label="a"]; +/// "SAbRFa" -> "SAbRFar" [label="r"]; +/// "SAbRFar" -> "SAbRFarf" [label="f"]; +/// "SAbR" -> "SAbRa" [label="a"]; +/// "SAbRa" -> "SAbRaF" [label="F"]; +/// "SAbRaF" -> "SAbRaFr" [label="r"]; +/// "SAbRaFr" -> "SAbRaFrf" [label="f"]; +/// "SAbRa" -> "SAbRar" [label="r"]; +/// "SAbRar" -> "SAbRarF" [label="F"]; +/// "SAbRarF" -> "SAbRarFf" [label="f"]; +/// "SAbRar" -> "SAbRarf" [label="f"]; +/// "SAbRarf" -> "SAbRarfF" [label="F"]; +/// "S" -> "Sa" [label="a"]; +/// "Sa" -> "SaB" [label="B"]; +/// "SaB" -> "SaBr" [label="r"]; +/// "SaBr" -> "SaBrA" [label="A"]; +/// "SaBrA" -> "SaBrAR" [label="R"]; +/// "SaBrAR" -> "SaBrARF" [label="F"]; +/// "SaBrARF" -> "SaBrARFf" [label="f"]; +/// "SaBrAR" -> "SaBrARf" [label="f"]; +/// "SaBrARf" -> "SaBrARfF" [label="F"]; +/// "SaBrA" -> "SaBrAf" [label="f"]; +/// "SaBrAf" -> "SaBrAfR" [label="R"]; +/// "SaBrAfR" -> "SaBrAfRF" [label="F"]; +/// "SaBr" -> "SaBrf" [label="f"]; +/// "SaBrf" -> "SaBrfA" [label="A"]; +/// "SaBrfA" -> "SaBrfAR" [label="R"]; +/// "SaBrfAR" -> "SaBrfARF" [label="F"]; +/// "Sa" -> "Sar" [label="r"]; +/// "Sar" -> "SarA" [label="A"]; +/// "SarA" -> "SarAR" [label="R"]; +/// "SarAR" -> "SarARF" [label="F"]; +/// "SarARF" -> "SarARFf" [label="f"]; +/// "SarAR" -> "SarARf" [label="f"]; +/// "SarARf" -> "SarARfF" [label="F"]; +/// "SarA" -> "SarAf" [label="f"]; +/// "SarAf" -> "SarAfR" [label="R"]; +/// "SarAfR" -> "SarAfRF" [label="F"]; +/// "Sar" -> "Sarf" [label="f"]; +/// "Sarf" -> "SarfA" [label="A"]; +/// "SarfA" -> "SarfAR" [label="R"]; +/// "SarfAR" -> "SarfARF" [label="F"]; +/// } +/// ``` fn two_threads_work(counter: &Arc) { counter.fetch_add(1, Ordering::SeqCst); @@ -76,9 +154,8 @@ fn two_threads() { runner.run(move || two_threads_work(&counter)); } - // 2 threads, 3 operations each (thread start, lock acq, lock rel) - // 2*3 choose 3 = 20 - assert_eq!(iterations.load(Ordering::SeqCst), 20); + // See `two_threads_work` for an illustration of all 16 interleavings. + assert_eq!(iterations.load(Ordering::SeqCst), 16); } #[test] @@ -92,18 +169,8 @@ fn two_threads_depth_4() { runner.run(move || two_threads_work(&counter)); } - // We have two threads T0 and T1 with the following lifecycle (with letter denoting each step): - // T0: spawns T1 (S), waits for lock (W), acquires + releases lock (L), finishes (F) - // T1: waits for lock (w), acquires + releases lock (l), finishes (f) - // - // We have the following constraints: - // operations in each thread are done in order - // S happens before w - // - // The set of valid interleavings of depth 4 is therefore: - // { SWLF, SWLw, SWwL, SWwl, SwWL, SwWl, SwlW, Swlf } - // for a total of 8 interleavings - assert_eq!(iterations.load(Ordering::SeqCst), 8); + // See `two_threads_work` for an illustration of all 6 interleavings up to depth 4. + assert_eq!(iterations.load(Ordering::SeqCst), 6); } #[test] @@ -117,20 +184,8 @@ fn two_threads_depth_5() { runner.run(move || two_threads_work(&counter)); } - // We have two threads T0 and T1 with the following lifecycle (with letter denoting each step): - // T0: spawns T1 (S), waits for lock (W), acquires + releases lock (L), finishes (F) - // T1: waits for lock (w), acquires + releases lock (l), finishes (f) - // - // We have the following constraints: - // operations in each thread are done in order - // S happens before w - // - // The set of valid interleavings of depth 5 is therefore: - // { SWLFw, SWLwF, SWwLF, SwWLF, // 4 ops by T0, 1 op by T1 - // SWLwl, SWwLl, SWwlL, SwWLl, SwWlL, SwlWL, // 3 ops by T0, 2 ops by T1 - // SWwlf, SwWlf, SwlWf, SwlfW } // 2 ops by T0, 3 ops by T1 - // for a total of 14 interleavings - assert_eq!(iterations.load(Ordering::SeqCst), 14); + // See `two_threads_work` for an illustration of all 10 interleavings up to depth 5. + assert_eq!(iterations.load(Ordering::SeqCst), 10); } #[test] diff --git a/tests/basic/mutex.rs b/tests/basic/mutex.rs index 5035de54..c4116897 100644 --- a/tests/basic/mutex.rs +++ b/tests/basic/mutex.rs @@ -1,7 +1,8 @@ use shuttle::scheduler::PctScheduler; use shuttle::sync::Mutex; -use shuttle::{check, check_random, thread, Runner}; -use std::sync::Arc; +use shuttle::{check, check_dfs, check_random, thread, Runner}; +use std::collections::HashSet; +use std::sync::{Arc, TryLockError}; use test_log::test; #[test] @@ -114,6 +115,230 @@ fn concurrent_increment() { }); } +/// Two concurrent threads, one doing two successive atomic increments, the other doing +/// an atomic multiplication by 2. The multiplication can go before, between, and after +/// the increments. Thus we expect to see final values +/// * 2 (`0 * 2 + 1 + 1`), +/// * 3 (`(0 + 1) * 2 + 1`), and +/// * 4 (`(0 + 1 + 1) * 2`). +#[test] +fn unlock_yields() { + let observed_values = Arc::new(std::sync::Mutex::new(HashSet::new())); + let observed_values_clone = Arc::clone(&observed_values); + + check_dfs( + move || { + let lock = Arc::new(Mutex::new(0usize)); + + let add_thread = { + let lock = Arc::clone(&lock); + thread::spawn(move || { + *lock.lock().unwrap() += 1; + *lock.lock().unwrap() += 1; + }) + }; + let mul_thread = { + let lock = Arc::clone(&lock); + thread::spawn(move || { + *lock.lock().unwrap() *= 2; + }) + }; + + add_thread.join().unwrap(); + mul_thread.join().unwrap(); + + let value = *lock.try_lock().unwrap(); + observed_values_clone.lock().unwrap().insert(value); + }, + None, + ); + + let observed_values = Arc::try_unwrap(observed_values).unwrap().into_inner().unwrap(); + assert_eq!(observed_values.len(), 3); + assert!(observed_values.contains(&2)); + assert!(observed_values.contains(&3)); + assert!(observed_values.contains(&4)); +} + +/// Similar to `unlock_yields`, but throwing in an `RwLock` in one thread. +/// +/// Note that the two threads could really run concurrently while one is holding the `Mutex` +/// and the other is holding the `RwLock`. Shuttle won't explore such an interleaving here +/// because observing it would require synchronization between the two threads, and thus +/// depend on the yields of the synchronization primitive used for that. Since this has +/// nothing to do the with the `Mutex` and `RwLock`, it does not seem useful to test. +#[test] +fn unlock_yields_mutex_rwlock_mix() { + let observed_values = Arc::new(std::sync::Mutex::new(HashSet::new())); + let observed_values_clone = Arc::clone(&observed_values); + + check_dfs( + move || { + let lock = Arc::new(Mutex::new(())); + let rwlock = Arc::new(shuttle::sync::RwLock::new(())); + let value = Arc::new(std::sync::Mutex::new(0usize)); + + let add_thread = { + let lock = Arc::clone(&lock); + let value = Arc::clone(&value); + thread::spawn(move || { + { + let _guard = lock.lock().unwrap(); + *value.lock().unwrap() += 1; + } + { + let _guard = rwlock.write().unwrap(); + *value.lock().unwrap() += 2; + } + { + let _guard = lock.lock().unwrap(); + *value.lock().unwrap() += 3; + } + }) + }; + let mul_thread = { + let lock = Arc::clone(&lock); + let log = Arc::clone(&value); + thread::spawn(move || { + let _guard = lock.lock().unwrap(); + *log.lock().unwrap() *= 2; + }) + }; + + add_thread.join().unwrap(); + mul_thread.join().unwrap(); + + let value = Arc::try_unwrap(value).unwrap().into_inner().unwrap(); + observed_values_clone.lock().unwrap().insert(value); + }, + None, + ); + + let observed_values = Arc::try_unwrap(observed_values).unwrap().into_inner().unwrap(); + assert_eq!(observed_values.len(), 4); + assert!(observed_values.contains(&6)); + assert!(observed_values.contains(&7)); + assert!(observed_values.contains(&9)); + assert!(observed_values.contains(&12)); +} + +/// Two concurrent threads trying to do an atomic increment using `try_lock`. +/// One `try_lock` must succeed, while the other may or may not succeed. +/// Thus we expect to see final values 1 and 2. +#[test] +fn concurrent_try_increment() { + let observed_values = Arc::new(std::sync::Mutex::new(HashSet::new())); + let observed_values_clone = Arc::clone(&observed_values); + + check_dfs( + move || { + let lock = Arc::new(Mutex::new(0usize)); + + let threads = (0..2) + .map(|_| { + let lock = Arc::clone(&lock); + thread::spawn(move || { + match lock.try_lock() { + Ok(mut guard) => { + *guard += 1; + } + Err(TryLockError::WouldBlock) => (), + Err(_) => panic!("unexpected TryLockError"), + }; + }) + }) + .collect::>(); + + for thd in threads { + thd.join().unwrap(); + } + + let value = *lock.try_lock().unwrap(); + observed_values_clone.lock().unwrap().insert(value); + }, + None, + ); + + let observed_values = Arc::try_unwrap(observed_values).unwrap().into_inner().unwrap(); + assert_eq!(observed_values.len(), 2); + assert!(observed_values.contains(&1)); + assert!(observed_values.contains(&2)); +} + +/// Two concurrent threads, one doing an atomic increment by 1 using `lock`, +/// the other trying to do an atomic increment by 1 followed by trying to do +/// an atomic increment by 2 using `try_lock`. The `lock` must succeed, while +/// both `try_lock`s may or may not succeed. Thus we expect to see final values +/// * 1 (both `try_lock`s fail), +/// * 2 (second `try_lock` fails), +/// * 3 (first `try_lock` fails), and +/// * 4 (both `try_lock`s succeed). +#[test] +fn concurrent_lock_try_lock() { + let observed_values = Arc::new(std::sync::Mutex::new(HashSet::new())); + let observed_values_clone = Arc::clone(&observed_values); + + check_dfs( + move || { + let lock = Arc::new(Mutex::new(0usize)); + + let lock_thread = { + let lock = Arc::clone(&lock); + thread::spawn(move || { + *lock.lock().unwrap() += 1; + }) + }; + let try_lock_thread = { + let lock = Arc::clone(&lock); + thread::spawn(move || { + for n in 1..3 { + match lock.try_lock() { + Ok(mut guard) => { + *guard += n; + } + Err(TryLockError::WouldBlock) => (), + Err(_) => panic!("unexpected TryLockError"), + }; + } + }) + }; + + lock_thread.join().unwrap(); + try_lock_thread.join().unwrap(); + + let value = *lock.try_lock().unwrap(); + observed_values_clone.lock().unwrap().insert(value); + }, + None, + ); + + let observed_values = Arc::try_unwrap(observed_values).unwrap().into_inner().unwrap(); + assert_eq!(observed_values.len(), 4); + assert!(observed_values.contains(&1)); + assert!(observed_values.contains(&2)); + assert!(observed_values.contains(&3)); + assert!(observed_values.contains(&4)); +} + +#[test] +#[should_panic(expected = "tried to acquire a Mutex it already holds")] +fn double_lock() { + check(|| { + let mutex = Mutex::new(()); + let _guard_1 = mutex.lock().unwrap(); + let _guard_2 = mutex.lock(); + }) +} + +#[test] +fn double_try_lock() { + check(|| { + let mutex = Mutex::new(()); + let _guard_1 = mutex.try_lock().unwrap(); + assert!(matches!(mutex.try_lock(), Err(TryLockError::WouldBlock))); + }) +} + // Check that we can safely execute the Drop handler of a Mutex without double-panicking #[test] #[should_panic(expected = "expected panic")] diff --git a/tests/basic/replay.rs b/tests/basic/replay.rs index db641d82..74c28c7b 100644 --- a/tests/basic/replay.rs +++ b/tests/basic/replay.rs @@ -100,7 +100,7 @@ fn deadlock_3() { #[should_panic(expected = "deadlock")] fn replay_deadlock3_block() { // Reproduce deadlock - let schedule = Schedule::new_from_task_ids(0, vec![0, 0, 1, 2, 1, 2, 0, 0]); + let schedule = Schedule::new_from_task_ids(0, vec![0, 0, 1, 2, 0, 0, 1, 2]); let scheduler = ReplayScheduler::new_from_schedule(schedule); let runner = Runner::new(scheduler, Default::default()); runner.run(deadlock_3); diff --git a/tests/demo/bounded_buffer.rs b/tests/demo/bounded_buffer.rs index 9f219a5e..a742090e 100644 --- a/tests/demo/bounded_buffer.rs +++ b/tests/demo/bounded_buffer.rs @@ -227,5 +227,5 @@ fn test_bounded_buffer_minimal_deadlock() { #[test] #[should_panic(expected = "deadlock")] fn test_bounded_buffer_minimal_deadlock_replay() { - replay(bounded_buffer_minimal, "91022600006c50a6699b246d92166d5ba22801") + replay(bounded_buffer_minimal, "910219e6c5a886c7a1f2d29e01106050a42ddb12455102") }