From 1d7d149779a7e0b525d74171b89d205993dba770 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 23 Oct 2019 21:08:15 +0900 Subject: [PATCH] Use simple spinlock in AtomicU64 --- src/sync/atomic.rs | 71 ++++++++++++++++++++++++++++++++++++++------ src/task/blocking.rs | 5 ---- src/task/task.rs | 5 ---- 3 files changed, 62 insertions(+), 19 deletions(-) diff --git a/src/sync/atomic.rs b/src/sync/atomic.rs index 75f69806e..353934b39 100644 --- a/src/sync/atomic.rs +++ b/src/sync/atomic.rs @@ -11,33 +11,86 @@ mod imp { #[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))] mod imp { - use std::sync::atomic::Ordering; - use std::sync::Mutex; + use std::cell::UnsafeCell; + use std::ops::{Deref, DerefMut}; + use std::sync::atomic::{AtomicBool, Ordering}; - #[derive(Debug)] - pub(crate) struct AtomicU64(Mutex); + use crossbeam_utils::Backoff; + + pub(crate) struct AtomicU64(Spinlock); impl AtomicU64 { - pub(crate) fn new(val: u64) -> Self { - Self(Mutex::new(val)) + pub(crate) const fn new(val: u64) -> Self { + Self(Spinlock::new(val)) } pub(crate) fn load(&self, _: Ordering) -> u64 { - *self.0.lock().unwrap() + *self.0.lock() } pub(crate) fn fetch_add(&self, val: u64, _: Ordering) -> u64 { - let mut lock = self.0.lock().unwrap(); + let mut lock = self.0.lock(); let prev = *lock; *lock = prev + val; prev } pub(crate) fn fetch_sub(&self, val: u64, _: Ordering) -> u64 { - let mut lock = self.0.lock().unwrap(); + let mut lock = self.0.lock(); let prev = *lock; *lock = prev - val; prev } } + + /// A simple spinlock. + struct Spinlock { + flag: AtomicBool, + value: UnsafeCell, + } + + unsafe impl Send for Spinlock {} + unsafe impl Sync for Spinlock {} + + impl Spinlock { + /// Returns a new spinlock initialized with `value`. + const fn new(value: T) -> Self { + Self { + flag: AtomicBool::new(false), + value: UnsafeCell::new(value), + } + } + + /// Locks the spinlock. + fn lock(&self) -> SpinlockGuard<'_, T> { + let backoff = Backoff::new(); + while self.flag.swap(true, Ordering::Acquire) { + backoff.snooze(); + } + SpinlockGuard(self) + } + } + + /// A guard holding a spinlock locked. + struct SpinlockGuard<'a, T>(&'a Spinlock); + + impl Drop for SpinlockGuard<'_, T> { + fn drop(&mut self) { + self.0.flag.store(false, Ordering::Release); + } + } + + impl Deref for SpinlockGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.0.value.get() } + } + } + + impl DerefMut for SpinlockGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.0.value.get() } + } + } } diff --git a/src/task/blocking.rs b/src/task/blocking.rs index 84ba3d130..a4d7eb24e 100644 --- a/src/task/blocking.rs +++ b/src/task/blocking.rs @@ -13,11 +13,6 @@ use crate::utils::abort_on_panic; const MAX_THREADS: u64 = 10_000; -#[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))] -lazy_static! { - static ref DYNAMIC_THREAD_COUNT: AtomicU64 = AtomicU64::new(0); -} -#[cfg(not(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc")))] static DYNAMIC_THREAD_COUNT: AtomicU64 = AtomicU64::new(0); struct Pool { diff --git a/src/task/task.rs b/src/task/task.rs index 2e90bbd18..d5ac2c127 100644 --- a/src/task/task.rs +++ b/src/task/task.rs @@ -113,11 +113,6 @@ pub struct TaskId(NonZeroU64); impl TaskId { pub(crate) fn new() -> TaskId { - #[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))] - lazy_static::lazy_static! { - static ref COUNTER: AtomicU64 = AtomicU64::new(1); - } - #[cfg(not(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc")))] static COUNTER: AtomicU64 = AtomicU64::new(1); let id = COUNTER.fetch_add(1, Ordering::Relaxed);