From f9f97d3d97613dee02bbd1dfef4cb48dc24f13a2 Mon Sep 17 00:00:00 2001 From: James Bornholt Date: Tue, 14 Jun 2022 22:34:53 -0500 Subject: [PATCH] Implement `RwLock::{try_read, try_write}` This is a little tricky because `std` is unclear about whether a thread can acquire the same read lock multiple times. For `read` it says: > This function might panic when called if the lock is already held by > the current thread. So acquiring a second read lock _might_ fail. But for `try_read` it says: > This function will return the WouldBlock error if the RwLock could not > be acquired because it was already locked exclusively. suggesting that `try_read` _must_ succeed the second time (the lock is not held exclusively). We resolve this ambiguity by choosing a conservative semantics that always forbids a thread acquiring the read lock twice. This helps us catch deadlocks, especially in async programs where a task might nondeterministically migrate between threads and only deadlock if that migration didn't happen. Another difficulty with this change is that causality is pretty hairy for the read side of a `RwLock`. In principle, concurrent readers shouldn't inherit each other's causality, as they don't affect whether the lock was readable or not. But that's really hard to implement, especially with `try_write` in the picture too. So again we choose a conservative implementation for our vector clocks that just always inherits causality from all prior lock holders. This is likely too strong, so we'd explore unnecessary symmetries, but I can't convince myself of the correctness of a weaker implementation. --- src/runtime/task/mod.rs | 10 +- src/sync/rwlock.rs | 154 +++++++++++-- tests/basic/clocks.rs | 63 +++--- tests/basic/rwlock.rs | 346 ++++++++++++++++++++++++++++- tests/demo/async_match_deadlock.rs | 2 +- 5 files changed, 525 insertions(+), 50 deletions(-) diff --git a/src/runtime/task/mod.rs b/src/runtime/task/mod.rs index 5c43c0e1..3bedbdf2 100644 --- a/src/runtime/task/mod.rs +++ b/src/runtime/task/mod.rs @@ -299,14 +299,20 @@ impl TaskSet { self.tasks.iter().all(|b| !*b) } - pub fn insert(&mut self, tid: TaskId) { + /// Add a task to the set. If the set did not have this value present, `true` is returned. If + /// the set did have this value present, `false` is returned. + pub fn insert(&mut self, tid: TaskId) -> bool { if tid.0 >= self.tasks.len() { self.tasks.resize(1 + tid.0, false); } - *self.tasks.get_mut(tid.0).unwrap() = true; + !std::mem::replace(&mut *self.tasks.get_mut(tid.0).unwrap(), true) } + /// Removes a value from the set. Returns whether the value was present in the set. pub fn remove(&mut self, tid: TaskId) -> bool { + if tid.0 >= self.tasks.len() { + return false; + } std::mem::replace(&mut self.tasks.get_mut(tid.0).unwrap(), false) } diff --git a/src/sync/rwlock.rs b/src/sync/rwlock.rs index 248f585c..d3bf11ae 100644 --- a/src/sync/rwlock.rs +++ b/src/sync/rwlock.rs @@ -11,6 +11,10 @@ use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult}; use tracing::trace; /// A reader-writer lock, the same as [`std::sync::RwLock`]. +/// +/// Unlike [`std::sync::RwLock`], the same thread is never allowed to acquire the read side of a +/// `RwLock` more than once. The `std` version is ambiguous about what behavior is allowed here, so +/// we choose the most conservative one. pub struct RwLock { state: Rc>, inner: std::sync::RwLock, @@ -99,8 +103,27 @@ impl RwLock { /// /// If the access could not be granted at this time, then Err is returned. This function does /// not block. + /// + /// Note that unlike [`std::sync::RwLock::try_read`], if the current thread already holds this + /// read lock, `try_read` will return Err. pub fn try_read(&self) -> TryLockResult> { - unimplemented!() + if self.try_lock(RwLockType::Read) { + match self.inner.try_read() { + Ok(guard) => Ok(RwLockReadGuard { + inner: Some(guard), + state: Rc::clone(&self.state), + me: ExecutionState::me(), + }), + Err(TryLockError::Poisoned(err)) => Err(TryLockError::Poisoned(PoisonError::new(RwLockReadGuard { + inner: Some(err.into_inner()), + state: Rc::clone(&self.state), + me: ExecutionState::me(), + }))), + Err(TryLockError::WouldBlock) => panic!("rwlock state out of sync"), + } + } else { + Err(TryLockError::WouldBlock) + } } /// Attempts to acquire this rwlock with shared read access. @@ -108,7 +131,23 @@ impl RwLock { /// If the access could not be granted at this time, then Err is returned. This function does /// not block. pub fn try_write(&self) -> TryLockResult> { - unimplemented!() + if self.try_lock(RwLockType::Write) { + match self.inner.try_write() { + Ok(guard) => Ok(RwLockWriteGuard { + inner: Some(guard), + state: Rc::clone(&self.state), + me: ExecutionState::me(), + }), + Err(TryLockError::Poisoned(err)) => Err(TryLockError::Poisoned(PoisonError::new(RwLockWriteGuard { + inner: Some(err.into_inner()), + state: Rc::clone(&self.state), + me: ExecutionState::me(), + }))), + Err(TryLockError::WouldBlock) => panic!("rwlock state out of sync"), + } + } else { + Err(TryLockError::WouldBlock) + } } /// Consumes this `RwLock`, returning the underlying data @@ -125,6 +164,7 @@ impl RwLock { self.inner.into_inner() } + /// Acquire the lock in the provided mode, blocking this thread until it succeeds. fn lock(&self, typ: RwLockType) { let me = ExecutionState::me(); @@ -133,7 +173,7 @@ impl RwLock { holder = ?state.holder, waiting_readers = ?state.waiting_readers, waiting_writers = ?state.waiting_writers, - "waiting to acquire {:?} lock on rwlock {:p}", + "acquiring {:?} lock on rwlock {:p}", typ, self.state, ); @@ -144,13 +184,18 @@ impl RwLock { } else { state.waiting_readers.insert(me); } - // Block if the lock is in a state where we can't acquire it immediately - match &state.holder { + // Block if the lock is in a state where we can't acquire it immediately. Note that we only + // need to context switch here if we can't acquire the lock. If it's available for us to + // acquire, but there is also another thread `t` that wants to acquire it, then `t` must + // have been runnable when this thread was chosen to execute and could have been chosen + // instead. + let should_switch = match &state.holder { RwLockHolder::Write(writer) => { if *writer == me { panic!("deadlock! task {:?} tried to acquire a RwLock it already holds", me); } ExecutionState::with(|s| s.current_mut().block()); + true } RwLockHolder::Read(readers) => { if readers.contains(me) { @@ -158,14 +203,18 @@ impl RwLock { } if typ == RwLockType::Write { ExecutionState::with(|s| s.current_mut().block()); + true + } else { + false } } - _ => {} - } + RwLockHolder::None => false, + }; drop(state); - // Acquiring a lock is a yield point - thread::switch(); + if should_switch { + thread::switch(); + } let mut state = self.state.borrow_mut(); // Once the scheduler has resumed this thread, we are clear to take the lock. We might @@ -203,14 +252,91 @@ impl RwLock { typ, self.state ); - // Update acquiring thread's clock with the clock stored in the RwLock - ExecutionState::with(|s| s.update_clock(&state.clock)); + + // Increment the current thread's clock and update this RwLock's clock to match. + // TODO we can likely do better here: there is no causality between multiple readers holding + // the lock at the same time. + ExecutionState::with(|s| { + s.update_clock(&state.clock); + state.clock.update(s.get_clock(me)); + }); // Block all other waiters, since we won the race to take this lock - // TODO a bit of a bummer that we have to do this (it would be cleaner if those threads - // TODO never become unblocked), but might need to track more state to avoid this. Self::block_waiters(&*state, me, typ); drop(state); + + // We need to let other threads in here so they may fail a `try_read` or `try_write`. This + // is the case because the current thread holding the lock might not have any further + // context switches until after releasing the lock. + thread::switch(); + } + + /// Attempt to acquire this lock in the provided mode, but without blocking. Returns `true` if + /// the lock was able to be acquired without blocking, or `false` otherwise. + fn try_lock(&self, typ: RwLockType) -> bool { + let me = ExecutionState::me(); + + let mut state = self.state.borrow_mut(); + trace!( + holder = ?state.holder, + waiting_readers = ?state.waiting_readers, + waiting_writers = ?state.waiting_writers, + "trying to acquire {:?} lock on rwlock {:p}", + typ, + self.state, + ); + + let acquired = match (typ, &mut state.holder) { + (RwLockType::Write, RwLockHolder::None) => { + state.holder = RwLockHolder::Write(me); + true + } + (RwLockType::Read, RwLockHolder::None) => { + let mut readers = TaskSet::new(); + readers.insert(me); + state.holder = RwLockHolder::Read(readers); + true + } + (RwLockType::Read, RwLockHolder::Read(readers)) => { + // If we already hold the read lock, `insert` returns false, which will cause this + // acquisition to fail with `WouldBlock` so we can diagnose potential deadlocks. + readers.insert(me) + } + _ => false, + }; + + trace!( + "{} {:?} lock on rwlock {:p}", + if acquired { "acquired" } else { "failed to acquire" }, + typ, + self.state, + ); + + // Update this thread's clock with the clock stored in the RwLock. + // We need to do the vector clock update even in the failing case, because there's a causal + // dependency: if the `try_lock` fails, the current thread `t1` knows that the thread `t2` + // that owns the lock is not in the right state to be read/written, and therefore `t1` has a + // causal dependency on everything that happened before in `t2` (which is recorded in the + // RwLock's clock). + // TODO we can likely do better here: there is no causality between successful `try_read`s + // and other concurrent readers, and there's no need to update the clock on failed + // `try_read`s. + ExecutionState::with(|s| { + s.update_clock(&state.clock); + state.clock.update(s.get_clock(me)); + }); + + // Block all other waiters, since we won the race to take this lock + Self::block_waiters(&*state, me, typ); + drop(state); + + // We need to let other threads in here so they + // (a) may fail a `try_lock` (in case we acquired), or + // (b) may release the lock (in case we failed to acquire) so we can succeed in a subsequent + // `try_lock`. + thread::switch(); + + acquired } fn block_waiters(state: &RwLockState, me: TaskId, typ: RwLockType) { @@ -324,7 +450,7 @@ impl Drop for RwLockReadGuard<'_, T> { state.holder = RwLockHolder::None; } } - _ => panic!("exiting a reader but rwlock is in the wrong state"), + _ => panic!("exiting a reader but rwlock is in the wrong state {:?}", state.holder), } if ExecutionState::should_stop() { diff --git a/tests/basic/clocks.rs b/tests/basic/clocks.rs index d51f3bce..a9a96d86 100644 --- a/tests/basic/clocks.rs +++ b/tests/basic/clocks.rs @@ -1,7 +1,7 @@ use shuttle::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use shuttle::sync::mpsc::{channel, sync_channel}; use shuttle::sync::{Barrier, Condvar, Mutex, Once, RwLock}; -use shuttle::{check_dfs, check_pct, current, thread}; +use shuttle::{check_dfs, check_pct, check_random, current, thread}; use std::collections::HashSet; use std::sync::Arc; use test_log::test; @@ -10,7 +10,7 @@ pub fn me() -> usize { usize::from(thread::current().id()) } -// TODO Maybe make this a macro so backtraces are more informative +#[track_caller] pub fn check_clock(f: impl Fn(usize, u32) -> bool) { for (i, &c) in current::clock().iter().enumerate() { assert!( @@ -76,32 +76,34 @@ fn clock_mutex_pct() { // RWLocks fn clock_rwlock(num_writers: usize, num_readers: usize) { - // This test checks that when a thread acquires a RwLock, it inherits the clocks - // of any writers that accessed the lock before it, but not the clocks from any readers. + // This test checks that when a thread acquires a RwLock, it inherits the clocks of writers that + // accessed the lock before it. It's the same as `clock_mutex`, except that readers don't update + // the set S, and aren't required to appear in the clock for future lock holders. // - // Test: create a rwlock-protected set, initialized with 0 (the id of the main thread) - // and spawn some writers and readers. Each thread does the following: - // (1) check that its own initial vector clock only has nonzero for the main thread (thread 0) - // (2w) [for writers only] acquire a write lock on the set and add its own thread id to it - // (2r) [for readers only] acquire a read lock on the set - // (3) read its own clock again, call this C - // (4) check that the only nonzero entries in C are for the threads in S and the current thread (for readers) - // - // Note: no dummy thread here since we're already checking that readers' clock entries are always zero - let mut set = HashSet::new(); - set.insert(0); - let set = Arc::new(RwLock::new(set)); + // TODO this test is pretty weak. Testing readers is hard because they race with each other; for + // example, a reader might see the clock update from another reader before that reader has a + // chance to update the set S. Causality is also pretty fuzzy for readers (see the TODOs in the + // RwLock implementation). So we don't test very much about them here. + let set = Arc::new(std::sync::Mutex::new(HashSet::from([0]))); + let lock = Arc::new(RwLock::new(())); + + // Create dummy thread (should have id 1) + thread::spawn(|| { + assert_eq!(me(), 1usize); + }); // Spawn the writers let _thds = (0..num_writers) .map(|_| { let set = Arc::clone(&set); + let lock = Arc::clone(&lock); thread::spawn(move || { check_clock(|i, c| (c > 0) == (i == 0)); - let mut set = set.write().unwrap(); + let _guard = lock.write().unwrap(); + let mut set = set.lock().unwrap(); set.insert(me()); - // Check that the only nonzero clock entries are for the threads in the set - check_clock(|i, c| (c > 0) == set.contains(&i)); + assert!(!set.contains(&1)); // dummy thread is never in the set + check_clock(|i, c| !set.contains(&i) || (c > 0)); }) }) .collect::>(); @@ -110,11 +112,13 @@ fn clock_rwlock(num_writers: usize, num_readers: usize) { let _thds = (0..num_readers) .map(|_| { let set = Arc::clone(&set); + let lock = Arc::clone(&lock); thread::spawn(move || { check_clock(|i, c| (c > 0) == (i == 0)); - let set = set.read().unwrap(); - // Check that the only nonzero clock entries are for threads in the set and the current thread - check_clock(|i, c| (c > 0) == (i == me() || set.contains(&i))); + let _guard = lock.read().unwrap(); + let set = set.lock().unwrap(); + assert!(!set.contains(&1)); // dummy thread is never in the set + check_clock(|i, c| !set.contains(&i) || (c > 0)); }) }) .collect::>(); @@ -122,14 +126,19 @@ fn clock_rwlock(num_writers: usize, num_readers: usize) { #[test] fn clock_rwlock_dfs() { - // TODO 2 writers + 2 readers takes too long right now; once we reduce context switching, it should be feasible - check_dfs(|| clock_rwlock(2, 1), None); - check_dfs(|| clock_rwlock(1, 2), None); + // Unfortunately anything larger than this takes > 500k iterations, too slow to be useful :( + // But the PCT and random tests below buy us a much bigger search. + check_dfs(|| clock_rwlock(1, 1), None); } #[test] fn clock_rwlock_pct() { - check_pct(|| clock_rwlock(10, 20), 10_000, 3); + check_pct(|| clock_rwlock(4, 4), 10_000, 3); +} + +#[test] +fn clock_rwlock_random() { + check_random(|| clock_rwlock(4, 4), 10_000); } // Barrier @@ -336,7 +345,7 @@ fn clock_mpsc_bounded() { // The sender has sent a message, so its clock is nonzero let c1 = current::clock().get(1); assert!(c1 > 0); - let _ = rx.recv().unwrap(); + rx.recv().unwrap(); // The sender has sent another message, so its clock has increased assert!(current::clock().get(2) > c1); // Receive the remaining messages diff --git a/tests/basic/rwlock.rs b/tests/basic/rwlock.rs index a3107940..c90da6aa 100644 --- a/tests/basic/rwlock.rs +++ b/tests/basic/rwlock.rs @@ -1,8 +1,9 @@ use shuttle::scheduler::PctScheduler; use shuttle::sync::{mpsc::channel, RwLock}; -use shuttle::{check, check_random, thread, Runner}; +use shuttle::{check, check_dfs, check_random, thread, Runner}; +use std::collections::HashSet; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, TryLockError}; use test_log::test; #[test] @@ -124,7 +125,7 @@ fn rwlock_two_writers() { // This test should never deadlock. #[test] fn rwlock_allows_multiple_readers() { - shuttle::check_dfs( + check_dfs( || { let lock1 = Arc::new(RwLock::new(1)); let lock2 = lock1.clone(); @@ -171,7 +172,7 @@ fn two_readers_and_one_writer() { #[test] fn rwlock_two_readers_and_one_writer_exhaustive() { - shuttle::check_dfs(two_readers_and_one_writer, None); + check_dfs(two_readers_and_one_writer, None); } #[test] @@ -183,7 +184,7 @@ fn rwlock_default() { } } - shuttle::check_dfs( + check_dfs( || { let point: RwLock = Default::default(); @@ -197,7 +198,7 @@ fn rwlock_default() { #[test] fn rwlock_into_inner() { - shuttle::check_dfs( + check_dfs( || { let lock = Arc::new(RwLock::new(0u64)); @@ -220,3 +221,336 @@ fn rwlock_into_inner() { None, ) } + +/// Two concurrent threads trying to do an atomic increment using `try_write`. +/// One `try_write` must succeed, while the other may or may not succeed. +/// Thus we expect to see final values 1 and 2. +#[test] +fn concurrent_try_increment() { + let observed_values = Arc::new(std::sync::Mutex::new(HashSet::new())); + let observed_values_clone = Arc::clone(&observed_values); + + check_dfs( + move || { + let lock = Arc::new(RwLock::new(0usize)); + + let threads = (0..2) + .map(|_| { + let lock = Arc::clone(&lock); + thread::spawn(move || { + match lock.try_write() { + Ok(mut guard) => { + *guard += 1; + } + Err(TryLockError::WouldBlock) => (), + Err(_) => panic!("unexpected TryLockError"), + }; + }) + }) + .collect::>(); + + for thd in threads { + thd.join().unwrap(); + } + + let value = Arc::try_unwrap(lock).unwrap().into_inner().unwrap(); + observed_values_clone.lock().unwrap().insert(value); + }, + None, + ); + + let observed_values = Arc::try_unwrap(observed_values).unwrap().into_inner().unwrap(); + assert_eq!(observed_values, HashSet::from([1, 2])); +} + +/// Run some threads that try to acquire the lock in both read and write modes, and check that any +/// execution allowed by our `RwLock` implementation is allowed by `std`. +#[test] +fn try_lock_implies_std() { + check_random( + move || { + let lock = Arc::new(RwLock::new(())); + let reference_lock = Arc::new(std::sync::RwLock::new(())); + + let threads = (0..3) + .map(|_| { + let lock = Arc::clone(&lock); + let reference_lock = Arc::clone(&reference_lock); + thread::spawn(move || { + for _ in 0..3 { + { + let _r = lock.try_read(); + if _r.is_ok() { + assert!(reference_lock.try_read().is_ok()); + } + } + { + let _w = lock.try_write(); + if _w.is_ok() { + assert!(reference_lock.try_write().is_ok()); + } + } + } + }) + }) + .collect::>(); + + for thd in threads { + thd.join().unwrap(); + } + }, + 5000, + ); +} + +/// Run some threads that try to acquire the lock in both read and write modes, and check that any +/// execution allowed by `std` is allowed by our `RwLock` implementation. (This implication isn't +/// true in general -- see `double_try_read` -- but is true for this test). +#[test] +fn try_lock_implied_by_std() { + check_random( + move || { + let lock = Arc::new(RwLock::new(())); + let reference_lock = Arc::new(std::sync::RwLock::new(())); + + let threads = (0..3) + .map(|_| { + let lock = Arc::clone(&lock); + let reference_lock = Arc::clone(&reference_lock); + thread::spawn(move || { + for _ in 0..5 { + { + let _r = reference_lock.try_read(); + if _r.is_ok() { + assert!(lock.try_read().is_ok()); + } + } + { + let _w = reference_lock.try_write(); + if _w.is_ok() { + assert!(lock.try_write().is_ok()); + } + } + } + }) + }) + .collect::>(); + + for thd in threads { + thd.join().unwrap(); + } + }, + 5000, + ); +} + +/// Three concurrent threads, one doing an atomic increment by 1 using `write`, one trying to do an +/// atomic increment by 1 followed by trying to do an atomic increment by 2 using `try_write`, and a +/// third that peeks at the value using `try_read.` The `write` must succeed, while each `try_write` +/// may or may not succeed. +#[test] +fn concurrent_write_try_write_try_read() { + let observed_values = Arc::new(std::sync::Mutex::new(HashSet::new())); + let observed_values_clone = Arc::clone(&observed_values); + + check_dfs( + move || { + let lock = Arc::new(RwLock::new(0usize)); + + let write_thread = { + let lock = Arc::clone(&lock); + thread::spawn(move || { + *lock.write().unwrap() += 1; + }) + }; + let try_write_thread = { + let lock = Arc::clone(&lock); + thread::spawn(move || { + for n in 1..3 { + match lock.try_write() { + Ok(mut guard) => { + *guard += n; + } + Err(TryLockError::WouldBlock) => (), + Err(_) => panic!("unexpected TryLockError"), + }; + } + }) + }; + + let read_value = match lock.try_read() { + Ok(guard) => Some(*guard), + Err(TryLockError::WouldBlock) => None, + Err(_) => panic!("unexpected TryLockError"), + }; + + write_thread.join().unwrap(); + try_write_thread.join().unwrap(); + + let final_value = Arc::try_unwrap(lock).unwrap().into_inner().unwrap(); + observed_values_clone.lock().unwrap().insert((final_value, read_value)); + }, + None, + ); + + let observed_values = Arc::try_unwrap(observed_values).unwrap().into_inner().unwrap(); + // The idea here is that the `try_read` can interleave anywhere between the (successful) writes, + // but can also just fail. + let expected_values = HashSet::from([ + // Both `try_write`s fail + (1, None), + (1, Some(0)), + (1, Some(1)), + // Second `try_write` fails + (2, None), + (2, Some(0)), + (2, Some(1)), + (2, Some(2)), + // First `try_write` fails + (3, None), + (3, Some(0)), + (3, Some(1)), + // (3, Some(2)), // If first `try_write` failed, the value of the lock is never 2 + (3, Some(3)), + // Both `try_write`s succeed + (4, None), + (4, Some(0)), + (4, Some(1)), + (4, Some(2)), + (4, Some(3)), + (4, Some(4)), + ]); + assert_eq!(observed_values, expected_values); +} + +/// This behavior is _sometimes_ allowed in `std`, but advised against, as it can lead to deadlocks +/// on some platforms if the second `read` races with another thread's `write`. We conservatively +/// rule it out in all cases to better detect potential deadlocks. +#[test] +#[should_panic(expected = "tried to acquire a RwLock it already holds")] +fn double_read() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.read().unwrap(); + let _guard_2 = rwlock.read(); + }, + None, + ) +} + +#[test] +#[should_panic(expected = "tried to acquire a RwLock it already holds")] +fn double_write() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.write().unwrap(); + let _guard_2 = rwlock.write(); + }, + None, + ) +} + +#[test] +#[should_panic(expected = "tried to acquire a RwLock it already holds")] +fn read_upgrade() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.read().unwrap(); + let _guard_2 = rwlock.write(); + }, + None, + ) +} + +#[test] +#[should_panic(expected = "tried to acquire a RwLock it already holds")] +fn write_downgrade() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.write().unwrap(); + let _guard_2 = rwlock.read(); + }, + None, + ) +} + +/// This behavior isn't consistent with `std`, which seems to suggest that `try_read` succeeds if +/// the current thread already holds a read lock. We assume it always fails so that we can more +/// easily diagnose potential deadlocks, especially with async tasks that might migrate across +/// threads in real implementations. +#[test] +fn double_try_read() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.try_read().unwrap(); + assert!(matches!(rwlock.try_read(), Err(TryLockError::WouldBlock))); + }, + None, + ) +} + +/// As with `double_try_read`, this isn't consistent with `std`. +#[test] +fn read_try_read() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.read().unwrap(); + assert!(matches!(rwlock.try_read(), Err(TryLockError::WouldBlock))); + }, + None, + ) +} + +#[test] +fn double_try_write() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.try_write().unwrap(); + assert!(matches!(rwlock.try_write(), Err(TryLockError::WouldBlock))); + }, + None, + ) +} + +#[test] +fn write_try_write() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.write().unwrap(); + assert!(matches!(rwlock.try_write(), Err(TryLockError::WouldBlock))); + }, + None, + ) +} + +#[test] +fn try_read_upgrade() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.try_read().unwrap(); + assert!(matches!(rwlock.try_write(), Err(TryLockError::WouldBlock))); + }, + None, + ) +} + +#[test] +fn try_write_downgrade() { + check_dfs( + || { + let rwlock = RwLock::new(()); + let _guard_1 = rwlock.try_write().unwrap(); + assert!(matches!(rwlock.try_read(), Err(TryLockError::WouldBlock))); + }, + None, + ) +} diff --git a/tests/demo/async_match_deadlock.rs b/tests/demo/async_match_deadlock.rs index bd496cee..6af4eb9d 100644 --- a/tests/demo/async_match_deadlock.rs +++ b/tests/demo/async_match_deadlock.rs @@ -74,5 +74,5 @@ fn async_match_deadlock() { fn asynch_match_deadlock_replay() { // Deterministically replay a deadlocking execution so we can, for example, single-step through // it in a debugger. - shuttle::replay(|| tokio::block_on(main()), "91010b98deab88a3ea91d6e001202808") + shuttle::replay(|| tokio::block_on(main()), "91010cbbc0daf8c5a5a9b162a08a08") }