diff --git a/iceoryx2-pal/concurrency-sync/Cargo.toml b/iceoryx2-pal/concurrency-sync/Cargo.toml index d0adb5c7a..0b68516a7 100644 --- a/iceoryx2-pal/concurrency-sync/Cargo.toml +++ b/iceoryx2-pal/concurrency-sync/Cargo.toml @@ -14,3 +14,4 @@ version = { workspace = true } [dev-dependencies] iceoryx2-bb-testing = { workspace = true } +generic-tests = { workspace = true } diff --git a/iceoryx2-pal/concurrency-sync/src/atomic.rs b/iceoryx2-pal/concurrency-sync/src/atomic.rs deleted file mode 100644 index 11b1eeef5..000000000 --- a/iceoryx2-pal/concurrency-sync/src/atomic.rs +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2024 Contributors to the Eclipse Foundation -// -// See the NOTICE file(s) distributed with this work for additional -// information regarding copyright ownership. -// -// This program and the accompanying materials are made available under the -// terms of the Apache Software License 2.0 which is available at -// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license -// which is available at https://opensource.org/licenses/MIT. -// -// SPDX-License-Identifier: Apache-2.0 OR MIT - -use core::{ - cell::UnsafeCell, - ops::{AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, Not, SubAssign}, - sync::atomic::Ordering, -}; - -use crate::{rwlock::RwLockWriterPreference, WaitAction}; - -pub type IceAtomicBool = core::sync::atomic::AtomicBool; -pub type IceAtomicUsize = core::sync::atomic::AtomicUsize; - -pub type IceAtomicU8 = core::sync::atomic::AtomicU8; -pub type IceAtomicU16 = core::sync::atomic::AtomicU16; -pub type IceAtomicU32 = core::sync::atomic::AtomicU32; -pub type IceAtomicI8 = core::sync::atomic::AtomicI8; -pub type IceAtomicI16 = core::sync::atomic::AtomicI16; -pub type IceAtomicI32 = core::sync::atomic::AtomicI32; - -#[cfg(target_pointer_width = "64")] -pub type IceAtomicI64 = core::sync::atomic::AtomicI64; - -#[cfg(target_pointer_width = "64")] -pub type IceAtomicU64 = core::sync::atomic::AtomicU64; - -#[cfg(target_pointer_width = "32")] -pub type IceAtomicI64 = IceAtomic; - -#[cfg(target_pointer_width = "32")] -pub type IceAtomicU64 = IceAtomic; - -type LockType = RwLockWriterPreference; - -#[repr(C)] -pub struct IceAtomic { - data: UnsafeCell, - lock: LockType, -} - -impl< - T: Copy - + Send - + Eq - + AddAssign - + SubAssign - + BitAndAssign - + BitOrAssign - + BitXorAssign - + Ord - + Not, - > IceAtomic -{ - pub fn new(v: T) -> Self { - Self { - data: UnsafeCell::new(v), - lock: LockType::new(), - } - } - - fn read_lock(&self) { - self.lock.read_lock(|_, _| WaitAction::Continue); - } - - fn write_lock(&self) { - self.lock - .write_lock(|_, _| WaitAction::Continue, |_| {}, |_| {}); - } - - fn unlock(&self) { - self.lock.unlock(|_| {}, |_| {}); - } - - pub const fn as_ptr(&self) -> *mut T { - self.data.get() - } - - pub fn compare_exchange( - &self, - current: T, - new: T, - success: Ordering, - failure: Ordering, - ) -> Result { - self.write_lock(); - let data = unsafe { *self.data.get() }; - if data != current { - core::sync::atomic::fence(failure); - self.unlock(); - return Err(data); - } - - unsafe { *self.data.get() = new }; - core::sync::atomic::fence(success); - self.unlock(); - Ok(data) - } - - pub fn compare_exchange_weak( - &self, - current: T, - new: T, - success: Ordering, - failure: Ordering, - ) -> Result { - self.compare_exchange(current, new, success, failure) - } - - fn fetch_op(&self, op: F, order: Ordering) -> T { - self.write_lock(); - let data = unsafe { *self.data.get() }; - op(); - core::sync::atomic::fence(order); - self.unlock(); - data - } - - pub fn fetch_add(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() += value }, order) - } - - pub fn fetch_and(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() &= value }, order) - } - - pub fn fetch_max(&self, value: T, order: Ordering) -> T { - self.fetch_op( - || { - let data = unsafe { *self.data.get() }; - unsafe { *self.data.get() = data.max(value) } - }, - order, - ) - } - - pub fn fetch_min(&self, value: T, order: Ordering) -> T { - self.fetch_op( - || { - let data = unsafe { *self.data.get() }; - unsafe { *self.data.get() = data.min(value) } - }, - order, - ) - } - - pub fn fetch_nand(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() &= !value }, order) - } - - pub fn fetch_or(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() |= value }, order) - } - - pub fn fetch_sub(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() -= value }, order) - } - - pub fn fetch_update(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() = value }, order) - } - - pub fn fetch_xor(&self, value: T, order: Ordering) -> T { - self.fetch_op(|| unsafe { *self.data.get() ^= value }, order) - } - - pub fn into_innter(self) -> T { - unsafe { *self.data.get() } - } - - pub fn load(&self, order: Ordering) -> T { - self.read_lock(); - let data = unsafe { *self.data.get() }; - core::sync::atomic::fence(order); - self.unlock(); - data - } - - pub fn store(&self, value: T, order: Ordering) { - self.write_lock(); - unsafe { *self.data.get() = value }; - core::sync::atomic::fence(order); - self.unlock(); - } - - pub fn swap(&self, value: T, order: Ordering) -> T { - self.write_lock(); - let data = unsafe { *self.data.get() }; - unsafe { *self.data.get() = value }; - core::sync::atomic::fence(order); - self.unlock(); - data - } -} diff --git a/iceoryx2-pal/concurrency-sync/src/ice_atomic.rs b/iceoryx2-pal/concurrency-sync/src/ice_atomic.rs new file mode 100644 index 000000000..e88db2510 --- /dev/null +++ b/iceoryx2-pal/concurrency-sync/src/ice_atomic.rs @@ -0,0 +1,301 @@ +// Copyright (c) 2024 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use core::{ + cell::UnsafeCell, + ops::{AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, Not, SubAssign}, + sync::atomic::Ordering, +}; + +use crate::{rwlock::RwLockWriterPreference, WaitAction}; + +pub type IceAtomicBool = core::sync::atomic::AtomicBool; +pub type IceAtomicUsize = core::sync::atomic::AtomicUsize; + +pub type IceAtomicU8 = core::sync::atomic::AtomicU8; +pub type IceAtomicU16 = core::sync::atomic::AtomicU16; +pub type IceAtomicU32 = core::sync::atomic::AtomicU32; +pub type IceAtomicI8 = core::sync::atomic::AtomicI8; +pub type IceAtomicI16 = core::sync::atomic::AtomicI16; +pub type IceAtomicI32 = core::sync::atomic::AtomicI32; + +#[cfg(target_pointer_width = "64")] +pub type IceAtomicI64 = core::sync::atomic::AtomicI64; + +#[cfg(target_pointer_width = "64")] +pub type IceAtomicU64 = core::sync::atomic::AtomicU64; + +#[cfg(target_pointer_width = "32")] +pub type IceAtomicI64 = IceAtomic; + +#[cfg(target_pointer_width = "32")] +pub type IceAtomicU64 = IceAtomic; + +type LockType = RwLockWriterPreference; + +pub mod internal { + use super::*; + + pub trait AtomicInteger: + Copy + + Send + + Eq + + AddAssign + + SubAssign + + BitAndAssign + + BitOrAssign + + BitXorAssign + + Ord + + Not + { + fn overflowing_add(self, rhs: Self) -> (Self, bool); + fn overflowing_sub(self, rhs: Self) -> (Self, bool); + } + + impl AtomicInteger for u64 { + fn overflowing_add(self, rhs: Self) -> (Self, bool) { + self.overflowing_add(rhs) + } + + fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + self.overflowing_sub(rhs) + } + } + + impl AtomicInteger for u128 { + fn overflowing_add(self, rhs: Self) -> (Self, bool) { + self.overflowing_add(rhs) + } + + fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + self.overflowing_sub(rhs) + } + } + + impl AtomicInteger for i64 { + fn overflowing_add(self, rhs: Self) -> (Self, bool) { + self.overflowing_add(rhs) + } + + fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + self.overflowing_sub(rhs) + } + } + + impl AtomicInteger for i128 { + fn overflowing_add(self, rhs: Self) -> (Self, bool) { + self.overflowing_add(rhs) + } + + fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + self.overflowing_sub(rhs) + } + } +} + +#[repr(C)] +pub struct IceAtomic { + data: UnsafeCell, + lock: LockType, +} + +impl IceAtomic { + pub fn new(v: T) -> Self { + Self { + data: UnsafeCell::new(v), + lock: LockType::new(), + } + } + + fn read_lock(&self) { + self.lock.read_lock(|_, _| WaitAction::Continue); + } + + fn write_lock(&self) { + self.lock + .write_lock(|_, _| WaitAction::Continue, |_| {}, |_| {}); + } + + fn unlock(&self) { + self.lock.unlock(|_| {}, |_| {}); + } + + pub const fn as_ptr(&self) -> *mut T { + self.data.get() + } + + pub fn compare_exchange( + &self, + current: T, + new: T, + _success: Ordering, + _failure: Ordering, + ) -> Result { + self.write_lock(); + let data = unsafe { *self.data.get() }; + if data != current { + core::sync::atomic::fence(Ordering::SeqCst); + self.unlock(); + return Err(data); + } + + unsafe { *self.data.get() = new }; + core::sync::atomic::fence(Ordering::SeqCst); + self.unlock(); + Ok(data) + } + + pub fn compare_exchange_weak( + &self, + current: T, + new: T, + success: Ordering, + failure: Ordering, + ) -> Result { + self.compare_exchange(current, new, success, failure) + } + + fn fetch_op T>(&self, op: F, _order: Ordering) -> T { + self.write_lock(); + let data = op(); + core::sync::atomic::fence(Ordering::SeqCst); + self.unlock(); + data + } + + pub fn fetch_add(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() = old.overflowing_add(value).0 }; + old + }, + order, + ) + } + + pub fn fetch_and(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() &= value }; + old + }, + order, + ) + } + + pub fn fetch_max(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() = old.max(value) }; + old + }, + order, + ) + } + + pub fn fetch_min(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() = old.min(value) }; + old + }, + order, + ) + } + + pub fn fetch_nand(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() &= !value }; + old + }, + order, + ) + } + + pub fn fetch_or(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() |= value }; + old + }, + order, + ) + } + + pub fn fetch_sub(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() = old.overflowing_sub(value).0 }; + old + }, + order, + ) + } + + pub fn fetch_update(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() = value }; + old + }, + order, + ) + } + + pub fn fetch_xor(&self, value: T, order: Ordering) -> T { + self.fetch_op( + || { + let old = unsafe { *self.data.get() }; + unsafe { *self.data.get() ^= value }; + old + }, + order, + ) + } + + pub fn into_inner(self) -> T { + unsafe { *self.data.get() } + } + + pub fn load(&self, _order: Ordering) -> T { + self.read_lock(); + let data = unsafe { *self.data.get() }; + core::sync::atomic::fence(Ordering::SeqCst); + self.unlock(); + data + } + + pub fn store(&self, value: T, _order: Ordering) { + self.write_lock(); + unsafe { *self.data.get() = value }; + core::sync::atomic::fence(Ordering::SeqCst); + self.unlock(); + } + + pub fn swap(&self, value: T, _order: Ordering) -> T { + self.write_lock(); + let data = unsafe { *self.data.get() }; + unsafe { *self.data.get() = value }; + core::sync::atomic::fence(Ordering::SeqCst); + self.unlock(); + data + } +} diff --git a/iceoryx2-pal/concurrency-sync/src/lib.rs b/iceoryx2-pal/concurrency-sync/src/lib.rs index 74b6a5bac..678744655 100644 --- a/iceoryx2-pal/concurrency-sync/src/lib.rs +++ b/iceoryx2-pal/concurrency-sync/src/lib.rs @@ -14,9 +14,9 @@ const SPIN_REPETITIONS: u64 = 10000; -pub mod atomic; pub mod barrier; pub mod condition_variable; +pub mod ice_atomic; pub mod mutex; pub mod rwlock; pub mod semaphore; diff --git a/iceoryx2-pal/concurrency-sync/tests/ice_atomic_tests.rs b/iceoryx2-pal/concurrency-sync/tests/ice_atomic_tests.rs new file mode 100644 index 000000000..ecbea44d7 --- /dev/null +++ b/iceoryx2-pal/concurrency-sync/tests/ice_atomic_tests.rs @@ -0,0 +1,294 @@ +// Copyright (c) 2023 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use std::sync::atomic::AtomicU32; + +static COUNTER: AtomicU32 = AtomicU32::new(0); + +#[generic_tests::define] +mod ice_atomic { + use super::*; + + use iceoryx2_bb_testing::assert_that; + use iceoryx2_pal_concurrency_sync::ice_atomic::{internal::AtomicInteger, IceAtomic}; + use std::{ + fmt::Debug, + ops::{BitAnd, BitOr}, + sync::atomic::Ordering, + }; + + trait Req: AtomicInteger + Debug + BitOr + BitAnd { + fn generate_value() -> Self; + } + + impl Req for u64 { + fn generate_value() -> Self { + 0x0000f0f0f0f0 + COUNTER.fetch_add(1, Ordering::Relaxed) as u64 + } + } + + impl Req for u128 { + fn generate_value() -> Self { + 0x00000f0f0f0f0f0f0f0f0f0f + COUNTER.fetch_add(1, Ordering::Relaxed) as u128 + } + } + + impl Req for i64 { + fn generate_value() -> Self { + (0x0000abcdabcdabcd + COUNTER.fetch_add(1, Ordering::Relaxed) as i64) + * (-1 as i64).pow(COUNTER.load(Ordering::Relaxed)) + } + } + + impl Req for i128 { + fn generate_value() -> Self { + (0x0000abcdabcdabcddeadbeef + COUNTER.fetch_add(1, Ordering::Relaxed) as i128) + * (-1 as i128).pow(COUNTER.load(Ordering::Relaxed)) + } + } + + #[test] + fn new_works() { + let n = T::generate_value(); + let sut = IceAtomic::::new(n); + + assert_that!(sut.load(Ordering::Relaxed), eq n); + } + + #[test] + fn as_ptr_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + let old_value = unsafe { *sut.as_ptr() }; + unsafe { *sut.as_ptr() = n2 }; + + assert_that!(old_value, eq n1); + assert_that!(unsafe{*sut.as_ptr()}, eq n2); + assert_that!(sut.load(Ordering::Relaxed), eq n2); + } + + #[test] + fn compare_exchange_success_works() { + let n_old = T::generate_value(); + let n_new = T::generate_value(); + let sut = IceAtomic::::new(n_old); + + let result = sut.compare_exchange(n_old, n_new, Ordering::Relaxed, Ordering::Relaxed); + + assert_that!(result, is_ok); + assert_that!(result.unwrap(), eq n_old); + } + + #[test] + fn compare_exchange_weak_success_works() { + let n_old = T::generate_value(); + let n_new = T::generate_value(); + let sut = IceAtomic::::new(n_old); + + let result = sut.compare_exchange_weak(n_old, n_new, Ordering::Relaxed, Ordering::Relaxed); + + assert_that!(result, is_ok); + assert_that!(result.unwrap(), eq n_old); + } + + #[test] + fn compare_exchange_failure_works() { + let n_old = T::generate_value(); + let n_new = T::generate_value(); + let sut = IceAtomic::::new(n_old); + + let result = sut.compare_exchange(n_new, n_old, Ordering::Relaxed, Ordering::Relaxed); + + assert_that!(result, is_err); + assert_that!(result.err().unwrap(), eq n_old); + } + + #[test] + fn compare_exchange_weak_failure_works() { + let n_old = T::generate_value(); + let n_new = T::generate_value(); + let sut = IceAtomic::::new(n_old); + + let result = sut.compare_exchange(n_new, n_old, Ordering::Relaxed, Ordering::Relaxed); + + assert_that!(result, is_err); + assert_that!(result.err().unwrap(), eq n_old); + } + + #[test] + fn fetch_add_works() { + let n = T::generate_value(); + let sut = IceAtomic::::new(n); + + let result = sut.fetch_add(n, Ordering::Relaxed); + + assert_that!(result, eq n); + assert_that!(sut.load(Ordering::Relaxed), eq n.overflowing_add(n).0); + } + + #[test] + fn fetch_and_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_and(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + let mut bit_and = n1; + bit_and &= n2; + assert_that!(sut.load(Ordering::Relaxed), eq bit_and); + } + + #[test] + fn fetch_max_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_max(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + assert_that!(sut.load(Ordering::Relaxed), eq n1.max(n2)); + } + + #[test] + fn fetch_min_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_min(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + assert_that!(sut.load(Ordering::Relaxed), eq n1.min(n2)); + } + + #[test] + fn fetch_nand_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_nand(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + let mut bit_nand = n1; + bit_nand &= !n2; + assert_that!(sut.load(Ordering::Relaxed), eq bit_nand); + } + + #[test] + fn fetch_or_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_or(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + let mut bit_or = n1; + bit_or |= n2; + assert_that!(sut.load(Ordering::Relaxed), eq bit_or); + } + + #[test] + fn fetch_sub_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_sub(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + assert_that!(sut.load(Ordering::Relaxed), eq n1.overflowing_sub(n2).0); + } + + #[test] + fn fetch_update_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_update(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + assert_that!(sut.load(Ordering::Relaxed), eq n2); + } + + #[test] + fn fetch_xor_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + + let sut = IceAtomic::::new(n1); + + let result = sut.fetch_xor(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + let mut bit_xor = n1; + bit_xor ^= n2; + assert_that!(sut.load(Ordering::Relaxed), eq bit_xor); + } + + #[test] + fn into_inner_works() { + let n = T::generate_value(); + let sut = IceAtomic::::new(n); + + assert_that!(IceAtomic::::into_inner(sut), eq n); + } + + #[test] + fn load_store_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + let sut = IceAtomic::::new(n1); + + sut.store(n2, Ordering::Relaxed); + + assert_that!(sut.load(Ordering::Relaxed), eq n2); + } + + #[test] + fn swap_works() { + let n1 = T::generate_value(); + let n2 = T::generate_value(); + let sut = IceAtomic::::new(n1); + + let result = sut.swap(n2, Ordering::Relaxed); + + assert_that!(result, eq n1); + assert_that!(sut.load(Ordering::Relaxed), eq n2); + } + + #[instantiate_tests()] + mod u64 {} + + #[instantiate_tests()] + mod u128 {} + + #[instantiate_tests()] + mod i64 {} + + #[instantiate_tests()] + mod i128 {} +}