Skip to content

Commit

Permalink
lock_api: Make RawMutexFair::unlock_fair() and RawMutexFair::bump() u…
Browse files Browse the repository at this point in the history
…nsafe.
  • Loading branch information
Thomas Bächler committed Jun 17, 2020
1 parent 14e28ca commit 4015183
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 17 deletions.
33 changes: 27 additions & 6 deletions lock_api/src/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,23 @@ pub unsafe trait RawMutex {
/// unlocking, but may be necessary in certain circumstances.
pub unsafe trait RawMutexFair: RawMutex {
/// Unlocks this mutex using a fair unlock protocol.
fn unlock_fair(&self);
///
/// # Safety
///
/// This method may only be called if the mutex is held in the current context, see
/// the documentation of [`unlock`].
///
/// [`unlock`]: trait.RawMutex.html#tymethod.unlock
unsafe fn unlock_fair(&self);

/// Temporarily yields the mutex to a waiting thread if there is one.
///
/// This method is functionally equivalent to calling `unlock_fair` followed
/// by `lock`, however it can be much more efficient in the case where there
/// are no waiting threads.
fn bump(&self) {
///
/// [`unlock`]: trait.RawMutex.html#tymethod.unlock
unsafe fn bump(&self) {
self.unlock_fair();
self.lock();
}
Expand Down Expand Up @@ -473,7 +482,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
/// using this method instead of dropping the `MutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
s.mutex.raw.unlock_fair();
// Safety: A MutexGuard always holds the lock.
unsafe {
s.mutex.raw.unlock_fair();
}
mem::forget(s);
}

Expand All @@ -488,7 +500,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
s.mutex.raw.unlock_fair();
// Safety: A MutexGuard always holds the lock.
unsafe {
s.mutex.raw.unlock_fair();
}
defer!(s.mutex.raw.lock());
f()
}
Expand All @@ -500,7 +515,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
s.mutex.raw.bump();
// Safety: A MutexGuard always holds the lock.
unsafe {
s.mutex.raw.bump();
}
}
}

Expand Down Expand Up @@ -634,7 +652,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
/// using this method instead of dropping the `MutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
s.raw.unlock_fair();
// Safety: A MutexGuard always holds the lock.
unsafe {
s.raw.unlock_fair();
}
mem::forget(s);
}
}
Expand Down
30 changes: 24 additions & 6 deletions lock_api/src/remutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,12 @@ impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
/// Unlocks this mutex using a fair unlock protocol. The inner mutex
/// may not be unlocked if this mutex was acquired previously in the
/// current thread.
///
/// # Safety
///
/// This method may only be called if the mutex is held by the current thread.
#[inline]
pub fn unlock_fair(&self) {
pub unsafe fn unlock_fair(&self) {
let lock_count = self.lock_count.get() - 1;
self.lock_count.set(lock_count);
if lock_count == 0 {
Expand All @@ -156,8 +160,10 @@ impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
/// This method is functionally equivalent to calling `unlock_fair` followed
/// by `lock`, however it can be much more efficient in the case where there
/// are no waiting threads.
///
/// This method may only be called if the mutex is held by the current thread.
#[inline]
pub fn bump(&self) {
pub unsafe fn bump(&self) {
if self.lock_count.get() == 1 {
let id = self.owner.load(Ordering::Relaxed);
self.owner.store(0, Ordering::Relaxed);
Expand Down Expand Up @@ -584,7 +590,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// using this method instead of dropping the `ReentrantMutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
s.remutex.raw.unlock_fair();
// Safety: A ReentrantMutexGuard always holds the lock
unsafe {
s.remutex.raw.unlock_fair();
}
mem::forget(s);
}

Expand All @@ -599,7 +608,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
where
F: FnOnce() -> U,
{
s.remutex.raw.unlock_fair();
// Safety: A ReentrantMutexGuard always holds the lock
unsafe {
s.remutex.raw.unlock_fair();
}
defer!(s.remutex.raw.lock());
f()
}
Expand All @@ -611,7 +623,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
s.remutex.raw.bump();
// Safety: A ReentrantMutexGuard always holds the lock
unsafe {
s.remutex.raw.bump();
}
}
}

Expand Down Expand Up @@ -752,7 +767,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// using this method instead of dropping the `ReentrantMutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
s.raw.unlock_fair();
// Safety: A MappedReentrantMutexGuard always holds the lock
unsafe {
s.raw.unlock_fair();
}
mem::forget(s);
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/raw_fair_mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ unsafe impl lock_api::RawMutex for RawFairMutex {

unsafe impl lock_api::RawMutexFair for RawFairMutex {
#[inline]
fn unlock_fair(&self) {
unsafe fn unlock_fair(&self) {
self.0.unlock_fair()
}

#[inline]
fn bump(&self) {
unsafe fn bump(&self) {
self.0.bump()
}
}
Expand Down
6 changes: 3 additions & 3 deletions src/raw_mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,8 @@ unsafe impl lock_api::RawMutex for RawMutex {

unsafe impl lock_api::RawMutexFair for RawMutex {
#[inline]
fn unlock_fair(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
unsafe fn unlock_fair(&self) {
deadlock::release_resource(self as *const _ as usize);
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
Expand All @@ -131,7 +131,7 @@ unsafe impl lock_api::RawMutexFair for RawMutex {
}

#[inline]
fn bump(&self) {
unsafe fn bump(&self) {
if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
self.bump_slow();
}
Expand Down

0 comments on commit 4015183

Please sign in to comment.