From 87396e0bf606a5511cbcd8eb25706f294080e6bd Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Tue, 22 Jan 2019 16:09:25 +0200 Subject: [PATCH 1/4] Assembler atomics Reimplement atomic code in inline assembly. This can improve optimisation, and avoids potential architectural problems with using LDREX/STREX intrinsics. API further extended: * Bitwise operations (fetch_and/fetch_or/fetch_xor) * fetch_add and fetch_sub (like incr/decr, but returning old value - aligning with C++11) * compare_exchange_weak * Explicit memory order specification * Basic freestanding template overloads for C++ This gives our existing C implementation essentially all the functionality needed by C++11. An actual Atomic template based upon these C functions could follow. --- .../lorawan/lorawanstack/unittest.cmake | 2 +- .../netsocket/DTLSSocket/unittest.cmake | 1 + .../DTLSSocketWrapper/unittest.cmake | 1 + .../netsocket/InternetSocket/unittest.cmake | 1 + .../netsocket/TCPServer/unittest.cmake | 1 + .../netsocket/TCPSocket/unittest.cmake | 1 + .../netsocket/TLSSocket/unittest.cmake | 1 + .../netsocket/TLSSocketWrapper/unittest.cmake | 1 + .../netsocket/UDPSocket/unittest.cmake | 1 + UNITTESTS/stubs/mbed_atomic_stub.c | 233 ++++ UNITTESTS/stubs/mbed_critical_stub.c | 123 -- .../COMPONENT_SPE/handles_manager.c | 2 +- .../COMPONENT_SPE/spm_common.c | 2 +- .../DataFlashBlockDevice.cpp | 2 +- .../FlashIAPBlockDevice.cpp | 2 +- .../wifi/esp8266-driver/ESP8266Interface.cpp | 2 +- features/lorawan/LoRaWANStack.h | 2 +- features/netsocket/InternetSocket.cpp | 1 + features/netsocket/InternetSocket.h | 2 +- .../blockdevice/BufferedBlockDevice.cpp | 2 +- .../blockdevice/ChainingBlockDevice.cpp | 2 +- .../blockdevice/ExhaustibleBlockDevice.cpp | 2 +- .../blockdevice/FlashSimBlockDevice.cpp | 2 +- .../storage/blockdevice/HeapBlockDevice.cpp | 2 +- .../storage/blockdevice/MBRBlockDevice.cpp | 2 +- features/storage/nvstore/source/nvstore.cpp | 2 +- mbed.h | 1 + platform/CircularBuffer.h | 1 + platform/DeepSleepLock.h | 2 +- platform/SharedPtr.h | 2 +- platform/SingletonPtr.h | 2 +- platform/internal/mbed_atomic_impl.c | 169 +++ platform/internal/mbed_atomic_impl.h | 1200 +++++++++++++++++ platform/mbed_atomic.h | 981 ++++++++++++++ platform/mbed_critical.c | 429 ------ platform/mbed_critical.h | 652 +-------- platform/mbed_error.c | 1 + platform/mbed_retarget.cpp | 1 + platform/mbed_sleep_manager.c | 1 + .../TARGET_NRF5x/TARGET_NRF52/serial_api.c | 1 + .../TARGET_M2351/crypto/crypto-misc.cpp | 1 + .../TARGET_M480/crypto/crypto-misc.cpp | 1 + .../TARGET_NUC472/crypto/crypto-misc.cpp | 1 + .../TARGET_LPC55S69/flash_api.c | 1 + targets/TARGET_STM/trng_api.c | 2 +- usb/device/USBDevice/USBDevice.h | 1 + 46 files changed, 2623 insertions(+), 1222 deletions(-) create mode 100644 UNITTESTS/stubs/mbed_atomic_stub.c create mode 100644 platform/internal/mbed_atomic_impl.c create mode 100644 platform/internal/mbed_atomic_impl.h create mode 100644 platform/mbed_atomic.h diff --git a/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake b/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake index 044bc7b152e..3b6c2b1d28c 100644 --- a/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake +++ b/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake @@ -35,7 +35,7 @@ set(unittest-test-sources stubs/LoRaPHY_stub.cpp stubs/LoRaMac_stub.cpp stubs/mbed_assert_stub.c - stubs/mbed_critical_stub.c + stubs/mbed_atomic_stub.c stubs/LoRaMacCrypto_stub.cpp stubs/LoRaMacChannelPlan_stub.cpp stubs/LoRaWANTimer_stub.cpp diff --git a/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake b/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake index d049b8632e9..8b66364f37b 100644 --- a/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake @@ -22,6 +22,7 @@ set(unittest-test-sources features/netsocket/DTLSSocket/test_DTLSSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake b/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake index e0bea911abb..5fc1e82ac17 100644 --- a/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake +++ b/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake @@ -21,6 +21,7 @@ set(unittest-test-sources features/netsocket/DTLSSocketWrapper/test_DTLSSocketWrapper.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake b/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake index 7811f3ad15a..39575a6a104 100644 --- a/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake @@ -18,6 +18,7 @@ set(unittest-test-sources features/netsocket/InternetSocket/test_InternetSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/features/netsocket/TCPServer/unittest.cmake b/UNITTESTS/features/netsocket/TCPServer/unittest.cmake index 7e81e49f293..e8c4572badf 100644 --- a/UNITTESTS/features/netsocket/TCPServer/unittest.cmake +++ b/UNITTESTS/features/netsocket/TCPServer/unittest.cmake @@ -22,6 +22,7 @@ set(unittest-sources set(unittest-test-sources stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake b/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake index 21c5e0586b5..f21779b4059 100644 --- a/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake @@ -19,6 +19,7 @@ set(unittest-test-sources features/netsocket/TCPSocket/test_TCPSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake b/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake index 43b03193f23..9e0b906fe8d 100644 --- a/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake @@ -21,6 +21,7 @@ set(unittest-test-sources features/netsocket/TLSSocket/test_TLSSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake b/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake index 2a86d3f6a73..2ff13c2dc6a 100644 --- a/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake +++ b/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake @@ -20,6 +20,7 @@ set(unittest-test-sources features/netsocket/TLSSocketWrapper/test_TLSSocketWrapper.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake b/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake index 3646338ca5e..99a5900b240 100644 --- a/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake @@ -19,6 +19,7 @@ set(unittest-test-sources features/netsocket/UDPSocket/test_UDPSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/stubs/mbed_atomic_stub.c b/UNITTESTS/stubs/mbed_atomic_stub.c new file mode 100644 index 00000000000..819a6ed4d4e --- /dev/null +++ b/UNITTESTS/stubs/mbed_atomic_stub.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2017, Arm Limited and affiliates. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "platform/mbed_atomic.h" + +bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) +{ + return false; +} + +bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) +{ + return false; +} + +bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) +{ + return false; +} + + +bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) +{ + return false; +} + + +uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) +{ + return 0; +} + +uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) +{ + return 0; +} + +uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) +{ + return 0; +} + + +uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) +{ + return 0; +} + +uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) +{ + return 0; +} + +uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) +{ + return 0; +} + + +uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) +{ + return 0; +} + +uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) +{ + return 0; +} + +uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) +{ + return 0; +} + +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ +} + +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + return 0; +} + +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) +{ + return false; +} + +bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) +{ + return false; +} + +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + return 0; +} + +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */ +extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); +extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); diff --git a/UNITTESTS/stubs/mbed_critical_stub.c b/UNITTESTS/stubs/mbed_critical_stub.c index b13bf61bff5..f86ccb82145 100644 --- a/UNITTESTS/stubs/mbed_critical_stub.c +++ b/UNITTESTS/stubs/mbed_critical_stub.c @@ -24,8 +24,6 @@ #include "platform/mbed_critical.h" #include "platform/mbed_toolchain.h" -static volatile uint32_t critical_section_reentrancy_counter = 0; - bool core_util_are_interrupts_enabled(void) { return false; @@ -48,124 +46,3 @@ void core_util_critical_section_enter(void) void core_util_critical_section_exit(void) { } - -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) -{ - return false; -} - -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) -{ - return false; -} - -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) -{ - return false; -} - - -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) -{ - return false; -} - - -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) -{ - return 0; -} - -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) -{ - return 0; -} - -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) -{ - return 0; -} - - -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - return 0; -} - -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - return 0; -} - -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - return 0; -} - - -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - return 0; -} - -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - return 0; -} - -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - return 0; -} - - -uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) -{ - return 0; -} - -void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ -} - -uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ - return 0; -} - -bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) -{ - return false; -} - -uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - return 0; -} - -uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - return 0; -} - - -bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) -{ - return false; -} - -void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) -{ - return NULL; -} - -void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return NULL; -} - -void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return NULL; -} - diff --git a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c index c4a5778d5bd..dbea8607b90 100644 --- a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c +++ b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c @@ -19,7 +19,7 @@ #include "psa_defs.h" #include "cmsis_os2.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "spm_internal.h" #include "spm_panic.h" #include "handles_manager.h" diff --git a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c index f98a0ba2bbf..94e070fa032 100644 --- a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c +++ b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c @@ -16,7 +16,7 @@ */ #include "cmsis_os2.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "psa_defs.h" #include "spm_internal.h" #include "spm_panic.h" diff --git a/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp b/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp index 70cdc4429d4..d6039be4cd0 100644 --- a/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp +++ b/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "DataFlashBlockDevice.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include diff --git a/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp b/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp index 99f5b678ed4..86c8a0bfe7f 100644 --- a/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp +++ b/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp @@ -17,7 +17,7 @@ #if DEVICE_FLASH #include "FlashIAPBlockDevice.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "mbed_error.h" using namespace mbed; diff --git a/components/wifi/esp8266-driver/ESP8266Interface.cpp b/components/wifi/esp8266-driver/ESP8266Interface.cpp index 9fc7fe10432..f690cb13047 100644 --- a/components/wifi/esp8266-driver/ESP8266Interface.cpp +++ b/components/wifi/esp8266-driver/ESP8266Interface.cpp @@ -26,7 +26,7 @@ #include "features/netsocket/nsapi_types.h" #include "mbed_trace.h" #include "platform/Callback.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_debug.h" #include "platform/mbed_wait_api.h" diff --git a/features/lorawan/LoRaWANStack.h b/features/lorawan/LoRaWANStack.h index 8d0d182f390..dbdcac01d8e 100644 --- a/features/lorawan/LoRaWANStack.h +++ b/features/lorawan/LoRaWANStack.h @@ -42,7 +42,7 @@ #include #include "events/EventQueue.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/Callback.h" #include "platform/NonCopyable.h" #include "platform/ScopedLock.h" diff --git a/features/netsocket/InternetSocket.cpp b/features/netsocket/InternetSocket.cpp index 4f08bc9dcbf..926ba8b335e 100644 --- a/features/netsocket/InternetSocket.cpp +++ b/features/netsocket/InternetSocket.cpp @@ -15,6 +15,7 @@ */ #include "InternetSocket.h" +#include "platform/mbed_critical.h" #include "platform/Callback.h" using namespace mbed; diff --git a/features/netsocket/InternetSocket.h b/features/netsocket/InternetSocket.h index ab41df9fbb5..6f1dc3e1730 100644 --- a/features/netsocket/InternetSocket.h +++ b/features/netsocket/InternetSocket.h @@ -25,7 +25,7 @@ #include "rtos/Mutex.h" #include "rtos/EventFlags.h" #include "Callback.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "mbed_toolchain.h" #include "SocketStats.h" diff --git a/features/storage/blockdevice/BufferedBlockDevice.cpp b/features/storage/blockdevice/BufferedBlockDevice.cpp index 23ec5db4c0d..4eeb7bf2e11 100644 --- a/features/storage/blockdevice/BufferedBlockDevice.cpp +++ b/features/storage/blockdevice/BufferedBlockDevice.cpp @@ -16,7 +16,7 @@ #include "BufferedBlockDevice.h" #include "platform/mbed_assert.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include #include diff --git a/features/storage/blockdevice/ChainingBlockDevice.cpp b/features/storage/blockdevice/ChainingBlockDevice.cpp index c5e31b3aa22..fa9f0362794 100644 --- a/features/storage/blockdevice/ChainingBlockDevice.cpp +++ b/features/storage/blockdevice/ChainingBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "ChainingBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_assert.h" namespace mbed { diff --git a/features/storage/blockdevice/ExhaustibleBlockDevice.cpp b/features/storage/blockdevice/ExhaustibleBlockDevice.cpp index a19d4bb093e..aeb5eb5849d 100644 --- a/features/storage/blockdevice/ExhaustibleBlockDevice.cpp +++ b/features/storage/blockdevice/ExhaustibleBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "ExhaustibleBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_assert.h" namespace mbed { diff --git a/features/storage/blockdevice/FlashSimBlockDevice.cpp b/features/storage/blockdevice/FlashSimBlockDevice.cpp index 04b30a9a2a7..61591309ed3 100644 --- a/features/storage/blockdevice/FlashSimBlockDevice.cpp +++ b/features/storage/blockdevice/FlashSimBlockDevice.cpp @@ -16,7 +16,7 @@ #include "FlashSimBlockDevice.h" #include "platform/mbed_assert.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include #include #include diff --git a/features/storage/blockdevice/HeapBlockDevice.cpp b/features/storage/blockdevice/HeapBlockDevice.cpp index c3c682e49d2..8dddb7e6dc5 100644 --- a/features/storage/blockdevice/HeapBlockDevice.cpp +++ b/features/storage/blockdevice/HeapBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "HeapBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include #include diff --git a/features/storage/blockdevice/MBRBlockDevice.cpp b/features/storage/blockdevice/MBRBlockDevice.cpp index cf4db6a5da1..1e65305ab4b 100644 --- a/features/storage/blockdevice/MBRBlockDevice.cpp +++ b/features/storage/blockdevice/MBRBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "MBRBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_toolchain.h" #include "platform/mbed_assert.h" #include diff --git a/features/storage/nvstore/source/nvstore.cpp b/features/storage/nvstore/source/nvstore.cpp index 41b2603dcd9..a503ffdeb28 100644 --- a/features/storage/nvstore/source/nvstore.cpp +++ b/features/storage/nvstore/source/nvstore.cpp @@ -22,7 +22,7 @@ #include "FlashIAP.h" #include "SystemStorage.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "mbed_assert.h" #include "mbed_error.h" #include "mbed_wait_api.h" diff --git a/mbed.h b/mbed.h index e247e1dfd33..b66b6b77e12 100644 --- a/mbed.h +++ b/mbed.h @@ -86,6 +86,7 @@ #include "drivers/InterruptIn.h" #include "platform/mbed_wait_api.h" #include "hal/sleep_api.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_power_mgmt.h" #include "platform/mbed_rtc_time.h" #include "platform/mbed_poll.h" diff --git a/platform/CircularBuffer.h b/platform/CircularBuffer.h index d1b15e7d5e7..77e8bb400d5 100644 --- a/platform/CircularBuffer.h +++ b/platform/CircularBuffer.h @@ -17,6 +17,7 @@ #ifndef MBED_CIRCULARBUFFER_H #define MBED_CIRCULARBUFFER_H +#include #include "platform/mbed_critical.h" #include "platform/mbed_assert.h" diff --git a/platform/DeepSleepLock.h b/platform/DeepSleepLock.h index 37aa98376ee..1fe95db2650 100644 --- a/platform/DeepSleepLock.h +++ b/platform/DeepSleepLock.h @@ -19,7 +19,7 @@ #include #include "platform/mbed_power_mgmt.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" namespace mbed { diff --git a/platform/SharedPtr.h b/platform/SharedPtr.h index 0a78931eda3..edaa8198b3e 100644 --- a/platform/SharedPtr.h +++ b/platform/SharedPtr.h @@ -23,7 +23,7 @@ #include #include -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" namespace mbed { diff --git a/platform/SingletonPtr.h b/platform/SingletonPtr.h index 5cb109ea49d..9d2cc669ce2 100644 --- a/platform/SingletonPtr.h +++ b/platform/SingletonPtr.h @@ -28,7 +28,7 @@ #include #include #include "platform/mbed_assert.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #ifdef MBED_CONF_RTOS_PRESENT #include "cmsis_os2.h" #endif diff --git a/platform/internal/mbed_atomic_impl.c b/platform/internal/mbed_atomic_impl.c new file mode 100644 index 00000000000..5087ae62f81 --- /dev/null +++ b/platform/internal/mbed_atomic_impl.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2019, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "platform/mbed_assert.h" +#include "platform/mbed_atomic.h" +#include "platform/mbed_critical.h" + +/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */ +MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte"); + +/* Inline implementations in the header use uint32_t versions to manipulate pointers */ +MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit"); + + +#define DO_MBED_LOCKED_OP(name, OP, retValue, T, fn_suffix) \ +T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + T oldValue, newValue; \ + core_util_critical_section_enter(); \ + oldValue = *valuePtr; \ + newValue = OP; \ + *valuePtr = newValue; \ + core_util_critical_section_exit(); \ + return retValue; \ +} + +#define DO_MBED_LOCKED_CAS_OP(T, fn_suffix) \ +bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + bool success; \ + T currentValue; \ + core_util_critical_section_enter(); \ + currentValue = *ptr; \ + if (currentValue == *expectedCurrentValue) { \ + *ptr = desiredValue; \ + success = true; \ + } else { \ + *expectedCurrentValue = currentValue; \ + success = false; \ + } \ + core_util_critical_section_exit(); \ + return success; \ +} \ + \ +bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, \ + T *expectedCurrentValue, T desiredValue) \ +{ \ + return core_util_atomic_cas_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ +} + +#if MBED_EXCLUSIVE_ACCESS +/* These are the C99 external definitions for the inline functions */ +/* We maintain external definitions rather than using "static inline" for backwards binary compatibility + * and to give the compiler plenty of leeway to choose to not inline in both C and C++ modes + */ + +extern inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr); + +extern inline uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t newValue); +extern inline uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t newValue); +extern inline uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t newValue); +extern inline uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); +extern inline bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); +extern inline bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); +extern inline bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); +extern inline bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); +extern inline bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); + +#else + +bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) +{ + core_util_critical_section_enter(); + uint8_t currentValue = flagPtr->_flag; + flagPtr->_flag = true; + core_util_critical_section_exit(); + return currentValue; +} +#endif + +/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */ +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) +{ + core_util_critical_section_enter(); + uint64_t currentValue = *valuePtr; + core_util_critical_section_exit(); + return currentValue; +} + +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + core_util_critical_section_enter(); + *valuePtr = desiredValue; + core_util_critical_section_exit(); +} + +/* Now locked operations for whichever we don't have lock-free ones for */ +#if MBED_EXCLUSIVE_ACCESS +/* Just need 64-bit locked operations */ +#define DO_MBED_LOCKED_OPS(name, OP, retValue) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_OPS() \ + DO_MBED_LOCKED_CAS_OP(uint64_t, u64) +#else +/* All the operations are locked */ +#define DO_MBED_LOCKED_OPS(name, OP, retValue) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint8_t, u8) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint16_t, u16) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint32_t, u32) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_OPS() \ + DO_MBED_LOCKED_CAS_OP(uint8_t, u8) \ + DO_MBED_LOCKED_CAS_OP(uint16_t, u16) \ + DO_MBED_LOCKED_CAS_OP(uint32_t, u32) \ + DO_MBED_LOCKED_CAS_OP(uint64_t, u64) +#endif + +// *INDENT-OFF* +DO_MBED_LOCKED_OPS(exchange, arg, oldValue) +DO_MBED_LOCKED_OPS(incr, oldValue + arg, newValue) +DO_MBED_LOCKED_OPS(decr, oldValue - arg, newValue) +DO_MBED_LOCKED_OPS(fetch_add, oldValue + arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_sub, oldValue - arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_and, oldValue & arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_or, oldValue | arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_xor, oldValue ^ arg, oldValue) +DO_MBED_LOCKED_CAS_OPS() +// *INDENT-ON* + +/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */ +extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); +extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); diff --git a/platform/internal/mbed_atomic_impl.h b/platform/internal/mbed_atomic_impl.h new file mode 100644 index 00000000000..cbabd4b708d --- /dev/null +++ b/platform/internal/mbed_atomic_impl.h @@ -0,0 +1,1200 @@ + +/* + * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MBED_ATOMIC_IMPL_H__ +#define __MBED_ATOMIC_IMPL_H__ + +#ifndef __MBED_UTIL_ATOMIC_H__ +#error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h" +#endif + +#include +#include "cmsis.h" +#include "platform/mbed_assert.h" +#include "platform/mbed_toolchain.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef MBED_DEBUG +/* Plain loads must not have "release" or "acquire+release" order */ +#define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel) + +/* Plain stores must not have "consume", "acquire" or "acquire+release" order */ +#define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel) + +/* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */ +#define MBED_CHECK_CAS_ORDER(success, failure) \ + MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel) +#else +#define MBED_CHECK_LOAD_ORDER(order) (void)0 +#define MBED_CHECK_STORE_ORDER(order) (void)0 +#define MBED_CHECK_CAS_ORDER(success, failure) (void)0 +#endif + +/* This is currently just to silence unit tests, so no better test required */ +#ifdef __MBED__ +#define MBED_ATOMIC_PTR_SIZE 32 +#else +#define MBED_ATOMIC_PTR_SIZE 64 +#endif + +/* Place barrier after a load or read-modify-write if a consume or acquire operation */ +#define MBED_ACQUIRE_BARRIER(order) do { \ + if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \ + MBED_BARRIER(); \ + } } while (0) + +/* Place barrier before a store or read-modify-write if a release operation */ +#define MBED_RELEASE_BARRIER(order) do { \ + if ((order) & mbed_memory_order_release) { \ + MBED_BARRIER(); \ + } } while (0) + +/* Place barrier after a plain store if a sequentially consistent */ +#define MBED_SEQ_CST_BARRIER(order) do { \ + if ((order) == mbed_memory_order_seq_cst) { \ + MBED_BARRIER(); \ + } } while (0) + + + +#if MBED_EXCLUSIVE_ACCESS + +/* This header file provides C inline definitions for atomic functions. */ +/* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */ + +/****************************** ASSEMBLER **********************************/ + +// Fiddle about with constraints. These work for GCC and clang, but +// IAR appears to be restricted to having only a single constraint, +// so we can't do immediates. +#if MBED_EXCLUSIVE_ACCESS_THUMB1 +#define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB +#define MBED_CMP_IMM "I" // CMP 8-bit immediate +#define MBED_SUB3_IMM "L" // -7 to +7 +#else +#define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers +#define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate +#define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate +#endif + +// ARM C 5 inline assembler recommends against using LDREX/STREX +// for same reason as intrinsics, but there's no other way to get +// inlining. ARM C 5 is being retired anyway. + +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ + __asm { \ + LDREX##M oldValue, [valuePtr] \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [newValue] "r" (newValue) \ + : \ + ) +#elif defined __ICCARM__ +/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */ +#define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [newValue] "r" (newValue) \ + : "memory" \ + ) +#endif + +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ + __asm { \ + LDREX##M oldValue, [valuePtr] \ + OP newValue, oldValue, arg \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [oldValue] "=&" MBED_DOP_REG (oldValue), \ + [newValue] "=&" MBED_DOP_REG (newValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [arg] Constants MBED_DOP_REG (arg) \ + : "cc" \ + ) +#elif defined __ICCARM__ +/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */ +#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + #OP "\t%[newValue], %[oldValue], %[arg]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [oldValue] "=&r" (oldValue), \ + [newValue] "=&r" (newValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [arg] "r" (arg) \ + : "memory", "cc" \ + ) +#endif + +/* Bitwise operations are harder to do in ARMv8-M baseline - there + * are only 2-operand versions of the instructions. + */ +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ + __asm { \ + LDREX##M oldValue, [valuePtr] \ + MOV newValue, oldValue \ + OP newValue, arg \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "MOV" "\t%[newValue], %[oldValue]\n\t" \ + #OP "\t%[newValue], %[arg]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [oldValue] "=&r" (oldValue), \ + [newValue] "=&l" (newValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [arg] Constants "l" (arg) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "MOV" "\t%[newValue], %[oldValue]\n" \ + #OP "\t%[newValue], %[arg]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [oldValue] "=&r" (oldValue), \ + [newValue] "=&r" (newValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [arg] "r" (arg) \ + : "memory", "cc" \ + ) +#endif + +/* Note that we split ARM and Thumb implementations for CAS, as + * the key distinction is the handling of conditions. Thumb-2 IT is + * partially deprecated, so avoid it, making Thumb-1 and Thumb-2 + * implementations the same. + */ +#if MBED_EXCLUSIVE_ACCESS_ARM +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm { \ + LDREX##M oldValue, [ptr] \ + SUBS fail, oldValue, expectedValue \ + STREX##M##EQ fail, desiredValue, [ptr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ + "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*ptr) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "ILr" (expectedValue) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ + "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "r" (expectedValue), \ + [valuePtr] "r" (ptr), \ + : "memory", "cc" \ + ) +#endif +#else // MBED_EXCLUSIVE_ACCESS_ARM +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm { \ + LDREX##M oldValue, [ptr] \ + SUBS fail, oldValue, expectedValue \ + BNE done \ + STREX##M fail, desiredValue, [ptr] \ +done: \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ + "BNE" "\t%=f\n\t" \ + "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \ + "%=:" \ + : [oldValue] "=&" MBED_DOP_REG (oldValue), \ + [fail] "=&" MBED_DOP_REG (fail), \ + [value] "+Q" (*ptr) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ + "BNE" "\tdone\n\t" \ + "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ + "done:" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "r" (expectedValue), \ + [valuePtr] "r" (ptr) \ + : "memory", "cc" \ + ) +#endif +#endif // MBED_EXCLUSIVE_ACCESS_ARM + +/* For strong CAS, conditional execution is complex enough to + * not be worthwhile, so all implementations look like Thumb-1. + * (This is the operation for which STREX returning 0 for success + * is beneficial.) + */ +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ + __asm { \ + retry: \ + LDREX##M oldValue, [ptr] \ + SUBS fail, oldValue, expectedValue \ + BNE done \ + STREX##M fail, desiredValue, [ptr] \ + CMP fail, #0 \ + BNE retry \ + done: \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ + __asm volatile ( \ + "\n%=:\n\t" \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ + "BNE" "\t%=f\n" \ + "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \ + "CMP" "\t%[fail], #0\n\t" \ + "BNE" "\t%=b\n" \ + "%=:" \ + : [oldValue] "=&" MBED_DOP_REG (oldValue), \ + [fail] "=&" MBED_DOP_REG (fail), \ + [value] "+Q" (*ptr) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ + asm volatile ( \ + "retry:\n" \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ + "BNE" "\tdone\n" \ + "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ + "CMP" "\t%[fail], #0\n" \ + "BNE" "\tretry\n" \ + "done:" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "r" (expectedValue), \ + [valuePtr] "r" (ptr) \ + : "memory", "cc" \ + ) +#endif + +/********************* LOCK-FREE IMPLEMENTATION MACROS ****************/ + +/* Note care taken with types here. Values which the assembler outputs correctly + * narrowed, or inputs without caring about width, are marked as type T. Other + * values are uint32_t. It's not clear from documentation whether assembler + * assumes anything about widths, but try to signal correctly to get necessary + * narrowing, and avoid unnecessary. + * Tests show that GCC in particular will pass in unnarrowed values - eg passing + * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8, + * but wouldn't be for compare_and_exchange_u8. + * On the other hand, it seems to be impossible to stop GCC inserting narrowing + * instructions for the output - it will always put in UXTB for the oldValue of + * an operation. + */ +#define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \ +inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \ +{ \ + T oldValue; \ + uint32_t fail; \ + MBED_BARRIER(); \ + DO_MBED_LOCKFREE_EXCHG_ASM(M); \ + MBED_BARRIER(); \ + return oldValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \ + volatile T *valuePtr, T newValue, mbed_memory_order order) \ +{ \ + T oldValue; \ + uint32_t fail; \ + MBED_RELEASE_BARRIER(order); \ + DO_MBED_LOCKFREE_EXCHG_ASM(M); \ + MBED_ACQUIRE_BARRIER(order); \ + return oldValue; \ +} + +#define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \ +inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + MBED_BARRIER(); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_BARRIER(); \ + return !fail; \ +} \ + \ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ +{ \ + MBED_CHECK_CAS_ORDER(success, failure); \ + MBED_RELEASE_BARRIER(success); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_ACQUIRE_BARRIER(fail ? failure : success); \ + return !fail; \ +} + +#define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \ +inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + MBED_BARRIER(); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_BARRIER(); \ + return !fail; \ +} \ + \ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ +{ \ + MBED_CHECK_CAS_ORDER(success, failure); \ + MBED_RELEASE_BARRIER(success); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_ACQUIRE_BARRIER(fail ? failure : success); \ + return !fail; \ +} + + +#define DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, T, fn_suffix, M) \ +inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_BARRIER(); \ + do { \ + DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_BARRIER(); \ + return (T) retValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_RELEASE_BARRIER(order); \ + do { \ + DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_ACQUIRE_BARRIER(order); \ + return (T) retValue; \ +} \ + +#define DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, T, fn_suffix, M) \ +inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_BARRIER(); \ + do { \ + DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_BARRIER(); \ + return (T) retValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_RELEASE_BARRIER(order); \ + do { \ + DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_ACQUIRE_BARRIER(order); \ + return (T) retValue; \ +} \ + +inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr) +{ + MBED_BARRIER(); + bool oldValue, newValue = true; + uint32_t fail; + do { + DO_MBED_LOCKFREE_EXCHG_ASM(B); + } while (fail); + MBED_BARRIER(); + return oldValue; +} + +MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order) +{ + MBED_RELEASE_BARRIER(order); + bool oldValue, newValue = true; + uint32_t fail; + do { + DO_MBED_LOCKFREE_EXCHG_ASM(B); + } while (fail); + MBED_ACQUIRE_BARRIER(order); + return oldValue; +} + +/********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/ + +#define DO_MBED_LOCKFREE_EXCHG_OPS() \ + DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \ + DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ + DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_3OPS(name, OP, Constants, retValue) \ + DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_2OPS(name, OP, Constants, retValue) \ + DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ + DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ + DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \ + DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \ + DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \ + DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ + DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) + + +// We always use the "S" form of operations - avoids yet another +// possible unneeded distinction between Thumbv1 and Thumbv2, and +// may reduce code size by allowing 16-bit instructions. +#if !MBED_EXCLUSIVE_ACCESS_THUMB1 +// I constraint is 12-bit modified immediate constant +// L constraint is negated 12-bit modified immediate constant +// (relying on assembler to swap ADD/SUB) +// We could permit J (-4095 to +4095) if we used ADD/SUB +// instead of ADDS/SUBS, but then that would block generation +// of the 16-bit forms. Shame we can't specify "don't care" +// for the "S", or get the GNU multi-alternative to +// choose ADDS/ADD appropriately. +DO_MBED_LOCKFREE_3OPS(incr, ADDS, "IL", newValue) +DO_MBED_LOCKFREE_3OPS(decr, SUBS, "IL", newValue) + +DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "IL", oldValue) +DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "IL", oldValue) +// K constraint is inverted 12-bit modified immediate constant +// (relying on assembler substituting BIC for AND) +DO_MBED_LOCKFREE_3OPS(fetch_and, ANDS, "IK", oldValue) +#if MBED_EXCLUSIVE_ACCESS_ARM +// ARM does not have ORN instruction, so take plain immediates. +DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "I", oldValue) +#else +// Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR. +DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "IK", oldValue) +#endif +// I constraint is 12-bit modified immediate operand +DO_MBED_LOCKFREE_3OPS(fetch_xor, EORS, "I", oldValue) +#else // MBED_EXCLUSIVE_ACCESS_THUMB1 +// L constraint is -7 to +7, suitable for 3-op ADD/SUB +// (relying on assembler to swap ADD/SUB) +DO_MBED_LOCKFREE_3OPS(incr, ADDS, "L", newValue) +DO_MBED_LOCKFREE_3OPS(decr, SUBS, "L", newValue) +DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "L", oldValue) +DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "L", oldValue) +DO_MBED_LOCKFREE_2OPS(fetch_and, ANDS, "", oldValue) +DO_MBED_LOCKFREE_2OPS(fetch_or, ORRS, "", oldValue) +DO_MBED_LOCKFREE_2OPS(fetch_xor, EORS, "", oldValue) +#endif + +DO_MBED_LOCKFREE_EXCHG_OPS() +DO_MBED_LOCKFREE_CAS_STRONG_OPS() +DO_MBED_LOCKFREE_CAS_WEAK_OPS() + +#define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) +#else // MBED_EXCLUSIVE_ACCESS +/* All the operations are locked, so need no ordering parameters */ +#define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) + +MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order) +{ + return core_util_atomic_flag_test_and_set(valuePtr); +} +#endif // MBED_EXCLUSIVE_ACCESS + +/********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE ****************/ + +/* Lock-free loads and stores don't need assembler - just aligned accesses */ +/* Silly ordering of `T volatile` is because T can be `void *` */ +#define DO_MBED_LOCKFREE_LOADSTORE(T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \ +{ \ + T value = *valuePtr; \ + MBED_BARRIER(); \ + return value; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \ +{ \ + MBED_CHECK_LOAD_ORDER(order); \ + T value = *valuePtr; \ + MBED_ACQUIRE_BARRIER(order); \ + return value; \ +} \ + \ +MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \ +{ \ + MBED_BARRIER(); \ + *valuePtr = value; \ + MBED_BARRIER(); \ +} \ + \ +MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \ +{ \ + MBED_CHECK_STORE_ORDER(order); \ + MBED_RELEASE_BARRIER(order); \ + *valuePtr = value; \ + MBED_SEQ_CST_BARRIER(order); \ +} + +MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr) +{ + MBED_BARRIER(); + flagPtr->_flag = false; + MBED_BARRIER(); +} + +MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order) +{ + MBED_CHECK_STORE_ORDER(order); + MBED_RELEASE_BARRIER(order); + flagPtr->_flag = false; + MBED_SEQ_CST_BARRIER(order); +} +DO_MBED_LOCKFREE_LOADSTORE(uint8_t, u8) +DO_MBED_LOCKFREE_LOADSTORE(uint16_t, u16) +DO_MBED_LOCKFREE_LOADSTORE(uint32_t, u32) +DO_MBED_LOCKFREE_LOADSTORE(int8_t, s8) +DO_MBED_LOCKFREE_LOADSTORE(int16_t, s16) +DO_MBED_LOCKFREE_LOADSTORE(int32_t, s32) +DO_MBED_LOCKFREE_LOADSTORE(bool, bool) +DO_MBED_LOCKFREE_LOADSTORE(void *, ptr) + + +/********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/ + +MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr) +{ + return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr); +} + +MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue) +{ + core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); +} + +#define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \ +MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \ + T *expectedCurrentValue, T desiredValue) \ +{ \ + return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \ + (u##T *)expectedCurrentValue, (u##T)desiredValue); \ +} \ + \ +MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \ + T *expectedCurrentValue, T desiredValue, \ + mbed_memory_order success, mbed_memory_order failure) \ +{ \ + return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \ + (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \ +} + +#define DO_MBED_SIGNED_CAS_OPS(name) \ + DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \ + DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \ + DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \ + DO_MBED_SIGNED_CAS_OP(name, int64_t, 64) + +DO_MBED_SIGNED_CAS_OPS(cas) +DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak) + +MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) +{ + return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); +} + +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ + return core_util_atomic_cas_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure); +} + +inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_cas_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue); +#else + return core_util_atomic_cas_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue); +#endif +} + +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_cas_explicit_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue, + success, failure); +#else + return core_util_atomic_cas_explicit_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue, + success, failure); +#endif +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) +{ + return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ + return core_util_atomic_compare_exchange_weak_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure); +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_compare_exchange_weak_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue); +#else + return core_util_atomic_compare_exchange_weak_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue); +#endif +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_compare_exchange_weak_explicit_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue, + success, failure); +#else + return core_util_atomic_compare_exchange_weak_explicit_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue, + success, failure); +#endif +} + +#define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \ +} + +#define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \ +} + +#define DO_MBED_SIGNED_FETCH_OPS(name) \ + DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \ + DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \ + DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \ + DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64) + +#define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64) + +DO_MBED_SIGNED_FETCH_OPS(exchange) +DO_MBED_SIGNED_FETCH_OPS(incr) +DO_MBED_SIGNED_FETCH_OPS(decr) +DO_MBED_SIGNED_FETCH_OPS(fetch_add) +DO_MBED_SIGNED_FETCH_OPS(fetch_sub) + +DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange) +DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add) +DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub) + +MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue) +{ + return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue); +} + +MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order) +{ + return (bool)core_util_atomic_exchange_explicit_u8((volatile uint8_t *)valuePtr, desiredValue, order); +} + +inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); +#else + return (void *)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_exchange_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order); +#else + return (void *)core_util_atomic_exchange_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order); +#endif +} + +inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); +#else + return (void *)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); +#endif +} + +inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); +#else + return (void *)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_add_u32((volatile uint32_t *)valuePtr, (uint32_t)arg); +#else + return (void *)core_util_atomic_fetch_add_u64((volatile uint64_t *)valuePtr, (uint64_t)arg); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_add_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order); +#else + return (void *)core_util_atomic_fetch_add_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_sub_u32((volatile uint32_t *)valuePtr, (uint32_t)arg); +#else + return (void *)core_util_atomic_fetch_sub_u64((volatile uint64_t *)valuePtr, (uint64_t)arg); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_sub_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order); +#else + return (void *)core_util_atomic_fetch_sub_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order); +#endif +} + +/***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS *****************/ + +/* Need to throw away the ordering information for all locked operations */ +MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_LOAD_ORDER(order); + return core_util_atomic_load_u64(valuePtr); +} + +MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_LOAD_ORDER(order); + return core_util_atomic_load_s64(valuePtr); +} + +MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_STORE_ORDER(order); + core_util_atomic_store_u64(valuePtr, desiredValue); +} + +MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_STORE_ORDER(order); + core_util_atomic_store_s64(valuePtr, desiredValue); +} + +#define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \ +{ \ + return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ +} + +#define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \ +MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *ptr, T *expectedCurrentValue, T desiredValue, \ + MBED_UNUSED mbed_memory_order success, \ + MBED_UNUSED mbed_memory_order failure) \ +{ \ + MBED_CHECK_CAS_ORDER(success, failure); \ + return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ +} + +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor) +DO_MBED_LOCKED_CAS_ORDERINGS(cas) +DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak) + +#ifdef __cplusplus +} // extern "C" + +/***************** TEMPLATE IMPLEMENTATIONS *****************/ + +/* Each of these groups provides specialisations for the T template for each of + * the small types (there is no base implementation), and the base implementation + * of the T * template. + */ +#define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ +template<> \ +inline T core_util_atomic_load(const volatile T *valuePtr) \ +{ \ + return core_util_atomic_load_##fn_suffix(valuePtr); \ +} \ + \ +template<> \ +inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \ +{ \ + return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ +} + +template +inline T *core_util_atomic_load(T *const volatile *valuePtr) +{ + return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr); +} + +template +inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) +{ + return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order); +} + +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8) +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16) +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32) +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64) +DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool) + +#define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ +template<> \ +inline void core_util_atomic_store(volatile T *valuePtr, T val) \ +{ \ + core_util_atomic_store_##fn_suffix(valuePtr, val); \ +} \ + \ +template<> \ +inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \ +{ \ + core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ +} + +template +inline void core_util_atomic_store(T *volatile *valuePtr, T *val) +{ + core_util_atomic_store_ptr((void *volatile *) valuePtr, val); +} + +template +inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) +{ + core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order); +} + +DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8) +DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16) +DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32) +DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64) +DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8) +DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16) +DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32) +DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64) +DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool) + +#define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ +template<> inline \ +bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ +} + +template +inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) +{ + return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue); +} + +template +inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) +{ + return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue); +} + +#define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool) + +DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas) +DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak) + +#define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ +template<> \ +inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \ +{ \ + return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ +} \ + \ +template<> \ +inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ + mbed_memory_order order) \ +{ \ + return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ +} + + +template<> +inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) +{ + return core_util_atomic_exchange_bool(valuePtr, arg); +} + +template<> +inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) +{ + return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order); +} + +template +inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) +{ + return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg); +} + +template +inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) +{ + return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order); +} + +template +inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) +{ + return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T)); +} + +template +inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ + return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order); +} + +template +inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) +{ + return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T)); +} + +template +inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ + return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order); +} + + +#define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64) + +#define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) + +DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange) +DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add) +DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub) +DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor) + +#endif // __cplusplus + +#undef MBED_DOP_REG +#undef MBED_CMP_IMM +#undef MBED_SUB3_IMM +#undef DO_MBED_LOCKFREE_EXCHG_ASM +#undef DO_MBED_LOCKFREE_3OP_ASM +#undef DO_MBED_LOCKFREE_2OP_ASM +#undef DO_MBED_LOCKFREE_CAS_WEAK_ASM +#undef DO_MBED_LOCKFREE_CAS_STRONG_ASM +#undef DO_MBED_LOCKFREE_LOADSTORE +#undef DO_MBED_LOCKFREE_EXCHG_OP +#undef DO_MBED_LOCKFREE_CAS_WEAK_OP +#undef DO_MBED_LOCKFREE_CAS_STRONG_OP +#undef DO_MBED_LOCKFREE_2OP +#undef DO_MBED_LOCKFREE_3OP +#undef DO_MBED_LOCKFREE_EXCHG_OPS +#undef DO_MBED_LOCKFREE_2OPS +#undef DO_MBED_LOCKFREE_3OPS +#undef DO_MBED_LOCKFREE_CAS_WEAK_OPS +#undef DO_MBED_LOCKFREE_CAS_STRONG_OPS +#undef DO_MBED_SIGNED_CAS_OP +#undef DO_MBED_SIGNED_CAS_OPS +#undef DO_MBED_SIGNED_FETCH_OP +#undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP +#undef DO_MBED_SIGNED_FETCH_OPS +#undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS +#undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS +#undef DO_MBED_LOCKED_CAS_ORDERINGS +#undef MBED_ACQUIRE_BARRIER +#undef MBED_RELEASE_BARRIER +#undef MBED_SEQ_CST_BARRIER +#undef DO_MBED_ATOMIC_LOAD_TEMPLATE +#undef DO_MBED_ATOMIC_STORE_TEMPLATE +#undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE +#undef DO_MBED_ATOMIC_CAS_TEMPLATE +#undef DO_MBED_ATOMIC_CAS_TEMPLATES +#undef DO_MBED_ATOMIC_FETCH_TEMPLATE +#undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES +#undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES + +#endif diff --git a/platform/mbed_atomic.h b/platform/mbed_atomic.h new file mode 100644 index 00000000000..4852c9bef26 --- /dev/null +++ b/platform/mbed_atomic.h @@ -0,0 +1,981 @@ + +/* + * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MBED_UTIL_ATOMIC_H__ +#define __MBED_UTIL_ATOMIC_H__ + +#include "cmsis.h" + +#include +#include +#include +#include "mbed_toolchain.h" + +/** \addtogroup platform */ +/** @{*/ + +/** + * \defgroup platform_atomic atomic functions + * + * Atomic functions function analogously to C11 and C++11 - loads have + * acquire semantics, stores have release semantics, and atomic operations + * are sequentially consistent. Atomicity is enforced both between threads and + * interrupt handlers. + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Memory order constraints for atomic operations. Intended semantics + * are as per C++11. + */ +typedef enum mbed_memory_order { + /* Bits 0 = consume + * 1 = acquire (explicitly requested, or implied by seq.cst) + * 2 = release (explicitly requested, or implied by seq.cst) + * 4 = sequentially consistent + */ + mbed_memory_order_relaxed = 0x00, + mbed_memory_order_consume = 0x01, + mbed_memory_order_acquire = 0x02, + mbed_memory_order_release = 0x04, + mbed_memory_order_acq_rel = 0x06, + mbed_memory_order_seq_cst = 0x16 +} mbed_memory_order; + +// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros +#ifndef MBED_EXCLUSIVE_ACCESS +#ifndef __EXCLUSIVE_ACCESS +#if defined __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH +#if ((__ARM_ARCH_7M__ == 1U) || \ + (__ARM_ARCH_7EM__ == 1U) || \ + (__ARM_ARCH_8M_BASE__ == 1U) || \ + (__ARM_ARCH_8M_MAIN__ == 1U)) || \ + (__ARM_ARCH_7A__ == 1U) +#define MBED_EXCLUSIVE_ACCESS 1U +#define MBED_EXCLUSIVE_ACCESS_THUMB1 (__ARM_ARCH_8M_BASE__ == 1U) +#ifdef __ICCARM__ +#if __CPU_MODE__ == 2 +#define MBED_EXCLUSIVE_ACCESS_ARM 1U +#else +#define MBED_EXCLUSIVE_ACCESS_ARM 0U +#endif +#else +#if !defined (__thumb__) +#define MBED_EXCLUSIVE_ACCESS_ARM 1U +#else +#define MBED_EXCLUSIVE_ACCESS_ARM 0U +#endif +#endif +#elif (__ARM_ARCH_6M__ == 1U) +#define MBED_EXCLUSIVE_ACCESS 0U +#else +#error "Unknown ARM architecture for exclusive access" +#endif // __ARM_ARCH_xxx +#else // __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH +// Seem to be compiling for non-ARM, so stick with critical section implementations +#define MBED_EXCLUSIVE_ACCESS 0U +#endif +#else +#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS +#endif +#endif + +#if MBED_EXCLUSIVE_ACCESS +#define MBED_INLINE_IF_EX inline +#else +#define MBED_INLINE_IF_EX +#endif + +/** + * A lock-free, primitive atomic flag. + * + * Emulate C11's atomic_flag. The flag is initially in an indeterminate state + * unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT. + */ +typedef struct core_util_atomic_flag { + uint8_t _flag; +} core_util_atomic_flag; + +/** + * Initializer for a core_util_atomic_flag. + * + * Example: + * ~~~ + * core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT; + * ~~~ + */ +#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 } + +/** + * Atomic test and set. + * + * Atomically tests then sets the flag to true, returning the previous value. + * + * @param flagPtr Target flag being tested and set. + * @return The previous value. + */ +MBED_INLINE_IF_EX bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr); + +/** \ copydoc core_util_atomic_flag_test_and_set + * @param order memory ordering constraint + */ +MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order); + +/** + * Atomic clear. + * + * @param flagPtr Target flag being cleared. + */ +MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr); + +/** \ copydoc core_util_atomic_flag_clear + * @param order memory ordering constraint + */ +MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order); + + +/** + * Atomic compare and set. It compares the contents of a memory location to a + * given value and, only if they are the same, modifies the contents of that + * memory location to a given new value. This is done as a single atomic + * operation. The atomicity guarantees that the new value is calculated based on + * up-to-date information; if the value had been updated by another thread in + * the meantime, the write would fail due to a mismatched expectedCurrentValue. + * + * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect + * you to the article on compare-and swap]. + * + * @param ptr The target memory location. + * @param[in,out] expectedCurrentValue A pointer to some location holding the + * expected current value of the data being set atomically. + * The computed 'desiredValue' should be a function of this current value. + * @note: This is an in-out parameter. In the + * failure case of atomic_cas (where the + * destination isn't set), the pointee of expectedCurrentValue is + * updated with the current value. + * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. + * + * @return true if the memory location was atomically + * updated with the desired value (after verifying + * that it contained the expectedCurrentValue), + * false otherwise. In the failure case, + * exepctedCurrentValue is updated with the new + * value of the target memory location. + * + * pseudocode: + * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { + * if *p != *old { + * *old = *p + * return false + * } + * *p = new + * return true + * } + * + * @note: In the failure case (where the destination isn't set), the value + * pointed to by expectedCurrentValue is instead updated with the current value. + * This property helps writing concise code for the following incr: + * + * function incr(p : pointer to int, a : int) returns int { + * done = false + * value = atomic_load(p) + * while not done { + * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success + * } + * return value + a + * } + * + * However, if the call is made in a loop like this, the atomic_compare_exchange_weak + * functions are to be preferred. + * + * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it + * always succeeds if the current value is expected, as per the pseudocode + * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. + * This call would normally be used when a fail return does not retry. + */ +MBED_INLINE_IF_EX bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); + +/** \copydoc core_util_atomic_cas_u8 + * @param success memory ordering constraint for successful exchange + * @param failure memory ordering constraint for failure + */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure); + + + +/** + * Atomic compare and set. It compares the contents of a memory location to a + * given value and, only if they are the same, modifies the contents of that + * memory location to a given new value. This is done as a single atomic + * operation. The atomicity guarantees that the new value is calculated based on + * up-to-date information; if the value had been updated by another thread in + * the meantime, the write would fail due to a mismatched expectedCurrentValue. + * + * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect + * you to the article on compare-and swap]. + * + * @param ptr The target memory location. + * @param[in,out] expectedCurrentValue A pointer to some location holding the + * expected current value of the data being set atomically. + * The computed 'desiredValue' should be a function of this current value. + * @note: This is an in-out parameter. In the + * failure case of atomic_cas (where the + * destination isn't set), the pointee of expectedCurrentValue is + * updated with the current value. + * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. + * + * @return true if the memory location was atomically + * updated with the desired value (after verifying + * that it contained the expectedCurrentValue), + * false otherwise. In the failure case, + * exepctedCurrentValue is updated with the new + * value of the target memory location. + * + * pseudocode: + * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { + * if *p != *old or spurious failure { + * *old = *p + * return false + * } + * *p = new + * return true + * } + * + * @note: In the failure case (where the destination isn't set), the value + * pointed to by expectedCurrentValue is instead updated with the current value. + * This property helps writing concise code for the following incr: + * + * function incr(p : pointer to int, a : int) returns int { + * done = false + * value = *p // This fetch operation need not be atomic. + * while not done { + * done = atomic_compare_exchange_weak(p, &value, value + a) // *value gets updated automatically until success + * } + * return value + a + * } + * + * @note: This corresponds to the C11 "atomic_compare_exchange_weak" - it + * may spuriously fail if the current value is expected, as per the pseudocode + * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. + * This call would normally be used when a fail return will cause a retry anyway, + * saving the need for an extra loop inside the cas operation. + */ +MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 + * @param success memory ordering constraint for successful exchange + * @param failure memory ordering constraint for failure + */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure); + + +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr); + +/** + * \copydoc core_util_atomic_load_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_load_explicit_u8(const volatile uint8_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_load_explicit_u16(const volatile uint16_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_load_explicit_u32(const volatile uint32_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_load_explicit_s8(const volatile int8_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_load_explicit_s16(const volatile int16_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_load_explicit_s32(const volatile int32_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE bool core_util_atomic_load_explicit_bool(const volatile bool *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE void *core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order); + +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); + +/** + * \copydoc core_util_atomic_store_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order); + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); + +/** \copydoc core_util_atomic_exchange_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_exchange_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_exchange_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_exchange_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_exchange_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_exchange_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order); + +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); + +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); + +/** + * Atomic add. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the addition. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_add_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_add_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_add_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_s8(volatile int8_t *valuePtr, int8_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_s16(volatile int16_t *valuePtr, int16_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_s32(volatile int32_t *valuePtr, int32_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_s64(volatile int64_t *valuePtr, int64_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); + +/** + * Atomic subtract. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the subtraction. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_sub_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_sub_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_sub_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_s8(volatile int8_t *valuePtr, int8_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_s16(volatile int16_t *valuePtr, int16_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_s32(volatile int32_t *valuePtr, int32_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_s64(volatile int64_t *valuePtr, int64_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); + +/** + * Atomic bitwise and. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the bitwise operation. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_and_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_and_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_and_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_and_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_and_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_and_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_and_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_and_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_and_u8 */ +uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_and_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_and_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** + * Atomic bitwise inclusive or. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the bitwise operation. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_or_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_or_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_or_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_or_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_or_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_or_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_or_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_or_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_or_u8 */ +uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_or_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_or_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** + * Atomic bitwise exclusive or. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the bitwise operation. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_xor_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_xor_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_xor_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_xor_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_xor_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_xor_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_xor_u8 */ +uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +#ifdef __cplusplus +} // extern "C" + +// For each operation, two overloaded templates: +// * one for non-pointer types, which has implementations based on the +// u8/u16/u32/u64/s8/s16/s32/s64/bool functions above. No base implementation. +// * one for any pointer type, generically implemented based on ptr function above. +// +// Templates use standard C/C++ naming - old incr/decr/cas forms are not provided. +// +// Note that C++ template selection somewhat inhibits the ease of use of these templates. +// Ambiguities arise with setting pointers to NULL, or adding constants to integers. +// It may be necessary to cast the argument or desired value to the correct type, or +// explictly specify the type - eg core_util_atomic_store(&fh, NULL) or +// core_util_atomic_store(&val, (uint8_t)1). +// A proper mbed::Atomic class would solve the issue. + +/** \copydoc core_util_atomic_load_u8 */ +template T core_util_atomic_load(const volatile T *valuePtr); +/** \copydoc core_util_atomic_store_u8 */ +template void core_util_atomic_store(volatile T *valuePtr, T desiredValue); +/** \copydoc core_util_atomic_exchange_u8 */ +template T core_util_atomic_exchange(volatile T *ptr, T desiredValue); +/** \copydoc core_util_atomic_cas_u8 */ +template bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue); +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +template bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue); +/** \copydoc core_util_fetch_add_u8 */ +template T core_util_atomic_fetch_add(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_sub_u8 */ +template T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_and_u8 */ +template T core_util_atomic_fetch_and(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_or_u8 */ +template T core_util_atomic_fetch_or(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_xor_u8 */ +template T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +template T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order); +/** \copydoc core_util_atomic_store_explicit_u8 */ +template void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +template T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_cas_explicit_u8 */ +template bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +template bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_fetch_add_explicit_u8 */ +template T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_sub_explicit_u8 */ +template T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_and_explicit_u8 */ +template T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_or_explicit_u8 */ +template T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_xor_explicit_u8 */ +template T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_ptr */ +template inline T *core_util_atomic_load(T *const volatile *valuePtr); +/** \copydoc core_util_atomic_store_ptr */ +template inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue); +/** \copydoc core_util_atomic_exchange_ptr */ +template inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue); +/** \copydoc core_util_atomic_cas_ptr */ +template inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue); +/** \copydoc core_util_atomic_compare_exchange_weak_ptr */ +template inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue); +/** \copydoc core_util_fetch_add_ptr */ +template inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg); +/** \copydoc core_util_fetch_sub_ptr */ +template inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg); + +/** \copydoc core_util_atomic_load_explicit_ptr */ +template inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order); +/** \copydoc core_util_atomic_store_explicit_ptr */ +template inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_exchange_explicit_ptr */ +template inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_cas_explicit_ptr */ +template inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */ +template inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_fetch_add_explicit_ptr */ +template inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); +/** \copydoc core_util_fetch_sub_explicit_ptr */ +template inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); + +#endif // __cplusplus + +/**@}*/ + +/**@}*/ + +/* Hide the implementation away */ +#include "platform/internal/mbed_atomic_impl.h" + +#endif // __MBED_UTIL_ATOMICL_H__ + + + diff --git a/platform/mbed_critical.c b/platform/mbed_critical.c index 8b847730c67..4fe7e6abff7 100644 --- a/platform/mbed_critical.c +++ b/platform/mbed_critical.c @@ -24,25 +24,6 @@ #include "platform/mbed_critical.h" #include "platform/mbed_toolchain.h" -// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros -#ifndef MBED_EXCLUSIVE_ACCESS -#ifndef __EXCLUSIVE_ACCESS -#if ((__ARM_ARCH_7M__ == 1U) || \ - (__ARM_ARCH_7EM__ == 1U) || \ - (__ARM_ARCH_8M_BASE__ == 1U) || \ - (__ARM_ARCH_8M_MAIN__ == 1U)) || \ - (__ARM_ARCH_7A__ == 1U) -#define MBED_EXCLUSIVE_ACCESS 1U -#elif (__ARM_ARCH_6M__ == 1U) -#define MBED_EXCLUSIVE_ACCESS 0U -#else -#error "Unknown architecture for exclusive access" -#endif -#else -#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS -#endif -#endif - static uint32_t critical_section_reentrancy_counter = 0; bool core_util_are_interrupts_enabled(void) @@ -99,413 +80,3 @@ void core_util_critical_section_exit(void) hal_critical_section_exit(); } } - -/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */ -MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte"); - -#if MBED_EXCLUSIVE_ACCESS - -/* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */ -#if defined (__CC_ARM) -#pragma diag_suppress 3731 -#endif - -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) -{ - MBED_BARRIER(); - uint8_t currentValue; - do { - currentValue = __LDREXB(&flagPtr->_flag); - } while (__STREXB(true, &flagPtr->_flag)); - MBED_BARRIER(); - return currentValue; -} - -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) -{ - MBED_BARRIER(); - do { - uint8_t currentValue = __LDREXB(ptr); - if (currentValue != *expectedCurrentValue) { - *expectedCurrentValue = currentValue; - __CLREX(); - return false; - } - } while (__STREXB(desiredValue, ptr)); - MBED_BARRIER(); - return true; -} - -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) -{ - MBED_BARRIER(); - do { - uint16_t currentValue = __LDREXH(ptr); - if (currentValue != *expectedCurrentValue) { - *expectedCurrentValue = currentValue; - __CLREX(); - return false; - } - } while (__STREXH(desiredValue, ptr)); - MBED_BARRIER(); - return true; -} - - -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) -{ - MBED_BARRIER(); - do { - uint32_t currentValue = __LDREXW(ptr); - if (currentValue != *expectedCurrentValue) { - *expectedCurrentValue = currentValue; - __CLREX(); - return false; - } - } while (__STREXW(desiredValue, ptr)); - MBED_BARRIER(); - return true; -} - -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue) -{ - MBED_BARRIER(); - uint8_t currentValue; - do { - currentValue = __LDREXB(valuePtr); - } while (__STREXB(desiredValue, valuePtr)); - MBED_BARRIER(); - return currentValue; -} - -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue) -{ - MBED_BARRIER(); - uint16_t currentValue; - do { - currentValue = __LDREXH(valuePtr); - } while (__STREXH(desiredValue, valuePtr)); - MBED_BARRIER(); - return currentValue; -} - -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue) -{ - MBED_BARRIER(); - uint32_t currentValue; - do { - currentValue = __LDREXW(valuePtr); - } while (__STREXW(desiredValue, valuePtr)); - MBED_BARRIER(); - return currentValue; -} - -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - MBED_BARRIER(); - uint8_t newValue; - do { - newValue = __LDREXB(valuePtr) + delta; - } while (__STREXB(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - MBED_BARRIER(); - uint16_t newValue; - do { - newValue = __LDREXH(valuePtr) + delta; - } while (__STREXH(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - MBED_BARRIER(); - uint32_t newValue; - do { - newValue = __LDREXW(valuePtr) + delta; - } while (__STREXW(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - - -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - MBED_BARRIER(); - uint8_t newValue; - do { - newValue = __LDREXB(valuePtr) - delta; - } while (__STREXB(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - MBED_BARRIER(); - uint16_t newValue; - do { - newValue = __LDREXH(valuePtr) - delta; - } while (__STREXH(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - MBED_BARRIER(); - uint32_t newValue; - do { - newValue = __LDREXW(valuePtr) - delta; - } while (__STREXW(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -#else - -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) -{ - core_util_critical_section_enter(); - uint8_t currentValue = flagPtr->_flag; - flagPtr->_flag = true; - core_util_critical_section_exit(); - return currentValue; -} - -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) -{ - bool success; - uint8_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) -{ - bool success; - uint16_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - - -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) -{ - bool success; - uint32_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - - -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) -{ - core_util_critical_section_enter(); - uint8_t currentValue = *ptr; - *ptr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) -{ - core_util_critical_section_enter(); - uint16_t currentValue = *ptr; - *ptr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) -{ - core_util_critical_section_enter(); - uint32_t currentValue = *ptr; - *ptr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - - -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - uint8_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - uint16_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - uint32_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - - -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - uint8_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - uint16_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - uint32_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -#endif - -/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */ -uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) -{ - core_util_critical_section_enter(); - uint64_t currentValue = *valuePtr; - core_util_critical_section_exit(); - return currentValue; -} - -void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ - core_util_critical_section_enter(); - *valuePtr = desiredValue; - core_util_critical_section_exit(); -} - -uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ - core_util_critical_section_enter(); - uint64_t currentValue = *valuePtr; - *valuePtr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - -bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) -{ - bool success; - uint64_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - -uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - uint64_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - uint64_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit"); - -bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) -{ - return core_util_atomic_cas_u32( - (volatile uint32_t *)ptr, - (uint32_t *)expectedCurrentValue, - (uint32_t)desiredValue); -} - -void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) -{ - return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); -} - -void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - -void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - diff --git a/platform/mbed_critical.h b/platform/mbed_critical.h index 5de8de85dbe..b6e68f533c9 100644 --- a/platform/mbed_critical.h +++ b/platform/mbed_critical.h @@ -20,9 +20,6 @@ #define __MBED_UTIL_CRITICAL_H__ #include -#include -#include -#include "mbed_toolchain.h" #ifdef __cplusplus extern "C" { @@ -92,658 +89,11 @@ bool core_util_in_critical_section(void); /**@}*/ -/** - * \defgroup platform_atomic atomic functions - * - * Atomic functions function analogously to C11 and C++11 - loads have - * acquire semantics, stores have release semantics, and atomic operations - * are sequentially consistent. Atomicity is enforced both between threads and - * interrupt handlers. - * - * @{ - */ - -/** - * A lock-free, primitive atomic flag. - * - * Emulate C11's atomic_flag. The flag is initially in an indeterminate state - * unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT. - */ -typedef struct core_util_atomic_flag { - uint8_t _flag; -} core_util_atomic_flag; - -/** - * Initializer for a core_util_atomic_flag. - * - * Example: - * ~~~ - * core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT; - * ~~~ - */ -#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 } - -/** - * Atomic test and set. - * - * Atomically tests then sets the flag to true, returning the previous value. - * - * @param flagPtr Target flag being tested and set. - * @return The previous value. - */ -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr); - -/** - * Atomic clear. - * - * @param flagPtr Target flag being cleared. - */ -MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr) -{ - MBED_BARRIER(); - flagPtr->_flag = false; - MBED_BARRIER(); -} - -/** - * Atomic compare and set. It compares the contents of a memory location to a - * given value and, only if they are the same, modifies the contents of that - * memory location to a given new value. This is done as a single atomic - * operation. The atomicity guarantees that the new value is calculated based on - * up-to-date information; if the value had been updated by another thread in - * the meantime, the write would fail due to a mismatched expectedCurrentValue. - * - * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect - * you to the article on compare-and swap]. - * - * @param ptr The target memory location. - * @param[in,out] expectedCurrentValue A pointer to some location holding the - * expected current value of the data being set atomically. - * The computed 'desiredValue' should be a function of this current value. - * @note: This is an in-out parameter. In the - * failure case of atomic_cas (where the - * destination isn't set), the pointee of expectedCurrentValue is - * updated with the current value. - * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. - * - * @return true if the memory location was atomically - * updated with the desired value (after verifying - * that it contained the expectedCurrentValue), - * false otherwise. In the failure case, - * exepctedCurrentValue is updated with the new - * value of the target memory location. - * - * pseudocode: - * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { - * if *p != *old { - * *old = *p - * return false - * } - * *p = new - * return true - * } - * - * @note: In the failure case (where the destination isn't set), the value - * pointed to by expectedCurrentValue is instead updated with the current value. - * This property helps writing concise code for the following incr: - * - * function incr(p : pointer to int, a : int) returns int { - * done = false - * value = *p // This fetch operation need not be atomic. - * while not done { - * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success - * } - * return value + a - * } - * - * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it - * always succeeds if the current value is expected, as per the pseudocode - * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. - */ -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int8_t core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue) -{ - return (int8_t)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, (uint8_t)desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int16_t core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue) -{ - return (int16_t)core_util_atomic_cas_u16((volatile uint16_t *)ptr, (uint16_t *)expectedCurrentValue, (uint16_t)desiredValue); -} -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int32_t core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue) -{ - return (int32_t)core_util_atomic_cas_u32((volatile uint32_t *)ptr, (uint32_t *)expectedCurrentValue, (uint32_t)desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int64_t core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue) -{ - return (int64_t)core_util_atomic_cas_u64((volatile uint64_t *)ptr, (uint64_t *)expectedCurrentValue, (uint64_t)desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) -{ - return (bool)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr) -{ - uint8_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr) -{ - uint16_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr) -{ - uint32_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr) -{ - int8_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr) -{ - int16_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr) -{ - int32_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr) -{ - return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr); -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr) -{ - bool value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr) -{ - void *value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue) -{ - core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue) -{ - return (int8_t)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, (uint8_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue) -{ - return (int16_t)core_util_atomic_exchange_u16((volatile uint16_t *)valuePtr, (uint16_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue) -{ - return (int32_t)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue) -{ - return (int64_t)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue) -{ - return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta) -{ - return (int8_t)core_util_atomic_incr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta) -{ - return (int16_t)core_util_atomic_incr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta) -{ - return (int32_t)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta) -{ - return (int64_t)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented in bytes. - * @return The new incremented value. - * - * @note The type of the pointer argument is not taken into account - * and the pointer is incremented by bytes. - */ -void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta) -{ - return (int8_t)core_util_atomic_decr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta) -{ - return (int16_t)core_util_atomic_decr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta) -{ - return (int32_t)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta) -{ - return (int64_t)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented in bytes. - * @return The new decremented value. - * - * @note The type of the pointer argument is not taken into account - * and the pointer is decremented by bytes - */ -void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +/**@}*/ #ifdef __cplusplus } // extern "C" #endif -/**@}*/ - -/**@}*/ #endif // __MBED_UTIL_CRITICAL_H__ diff --git a/platform/mbed_error.c b/platform/mbed_error.c index ee5e93e6cc5..d30da62485e 100644 --- a/platform/mbed_error.c +++ b/platform/mbed_error.c @@ -20,6 +20,7 @@ #include "device.h" #include "platform/mbed_crash_data_offsets.h" #include "platform/mbed_retarget.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #include "platform/mbed_error.h" #include "platform/mbed_error_hist.h" diff --git a/platform/mbed_retarget.cpp b/platform/mbed_retarget.cpp index 5a6a0413270..0ea946bae7d 100644 --- a/platform/mbed_retarget.cpp +++ b/platform/mbed_retarget.cpp @@ -27,6 +27,7 @@ #include "platform/PlatformMutex.h" #include "platform/mbed_error.h" #include "platform/mbed_stats.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #include "platform/mbed_poll.h" #include "platform/PlatformMutex.h" diff --git a/platform/mbed_sleep_manager.c b/platform/mbed_sleep_manager.c index d9ae6a80289..a0a5c3d527a 100644 --- a/platform/mbed_sleep_manager.c +++ b/platform/mbed_sleep_manager.c @@ -17,6 +17,7 @@ #include "mbed_power_mgmt.h" #include "mbed_interface.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_assert.h" #include "mbed_error.h" diff --git a/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c b/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c index 7849ecf6ce9..83d4232e949 100644 --- a/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c +++ b/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c @@ -50,6 +50,7 @@ #include "nrf_drv_gpiote.h" #include "PeripheralPins.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #if UART0_ENABLED == 0 diff --git a/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp b/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp index eed26038368..e395dc614ff 100644 --- a/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp +++ b/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp @@ -17,6 +17,7 @@ #include "cmsis.h" #include "mbed_assert.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_error.h" #include diff --git a/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp b/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp index 18f517a8443..f0e7a802013 100644 --- a/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp +++ b/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp @@ -17,6 +17,7 @@ #include "cmsis.h" #include "mbed_assert.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_error.h" #include diff --git a/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp b/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp index e4fd9143c37..9a74a3ce20f 100644 --- a/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp +++ b/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp @@ -17,6 +17,7 @@ #include "cmsis.h" #include "mbed_assert.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_error.h" #include diff --git a/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c b/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c index b3dacf084b9..b683063c1b7 100644 --- a/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c +++ b/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c @@ -15,6 +15,7 @@ */ #include "flash_api.h" +#include "mbed_toolchain.h" #include "mbed_critical.h" #if DEVICE_FLASH diff --git a/targets/TARGET_STM/trng_api.c b/targets/TARGET_STM/trng_api.c index 4f68b60defd..205598a4ee2 100644 --- a/targets/TARGET_STM/trng_api.c +++ b/targets/TARGET_STM/trng_api.c @@ -24,7 +24,7 @@ #include "cmsis.h" #include "trng_api.h" #include "mbed_error.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #if defined (TARGET_STM32WB) /* Family specific include for WB with HW semaphores */ #include "hw.h" diff --git a/usb/device/USBDevice/USBDevice.h b/usb/device/USBDevice/USBDevice.h index 4409bb33d10..01e25141411 100644 --- a/usb/device/USBDevice/USBDevice.h +++ b/usb/device/USBDevice/USBDevice.h @@ -18,6 +18,7 @@ #ifndef USBDEVICE_H #define USBDEVICE_H +#include #include "USBDevice_Types.h" #include "USBPhy.h" #include "mbed_critical.h" From 0fabf375646610f9265344b0a221c3b6799a4c23 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Wed, 27 Mar 2019 16:11:06 +0200 Subject: [PATCH 2/4] Add some atomic tests These are platform tests, but rely on the RTOS to run multiple threads to exercise it. (The atomics are still useful in non-RTOS, to protect against interrupt handlers, but testing versus other threads is easier. The implementation is the same either way, so doesn't seem worth testing non-RTOS specifically). --- TESTS/mbed_platform/atomic/main.cpp | 185 ++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 TESTS/mbed_platform/atomic/main.cpp diff --git a/TESTS/mbed_platform/atomic/main.cpp b/TESTS/mbed_platform/atomic/main.cpp new file mode 100644 index 00000000000..3a2f99d0eb9 --- /dev/null +++ b/TESTS/mbed_platform/atomic/main.cpp @@ -0,0 +1,185 @@ +/* mbed Microcontroller Library + * Copyright (c) 2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mbed.h" +#include "greentea-client/test_env.h" +#include "unity/unity.h" +#include "utest/utest.h" + +#if !MBED_CONF_RTOS_PRESENT +#error [NOT_SUPPORTED] test not supported +#endif + +#define THREAD_STACK 512 + +using utest::v1::Case; + + +namespace { + +/* Lock-free operations will be much faster - keep runtime down */ +#if MBED_ATOMIC_INT_LOCK_FREE +#define ADD_ITERATIONS (SystemCoreClock / 1000) +#else +#define ADD_ITERATIONS (SystemCoreClock / 8000) +#endif + +template +void add_incrementer(T *ptr) +{ + for (long i = ADD_ITERATIONS; i > 0; i--) { + core_util_atomic_fetch_add(ptr, T(1)); + } +} + +template +void add_release_incrementer(T *ptr) +{ + for (long i = ADD_ITERATIONS; i > 0; i--) { + core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release); + } +} + +template +void sub_incrementer(T *ptr) +{ + for (long i = ADD_ITERATIONS; i > 0; i--) { + core_util_atomic_fetch_sub(ptr, T(-1)); + } +} + +template +void bitops_incrementer(T *ptr) +{ + for (long i = ADD_ITERATIONS; i > 0; i--) { + core_util_atomic_fetch_add(ptr, T(1)); + core_util_atomic_fetch_and(ptr, T(-1)); + core_util_atomic_fetch_or(ptr, T(0)); + } +} + +template +void weak_incrementer(T *ptr) +{ + for (long i = ADD_ITERATIONS; i > 0; i--) { + T val = core_util_atomic_load(ptr); + do { + } while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1))); + } +} + +template +void strong_incrementer(T *ptr) +{ + for (long i = ADD_ITERATIONS; i > 0; i--) { + T val = core_util_atomic_load(ptr); + do { + } while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1))); + } +} + + +/* + * Run multiple threads incrementing each data item + * ADD_ITERATIONS times, and at the end, check that + * each item is * ADD_ITERATIONS. + * Items are adjacent to catch any interference. + * + * Using core_util_atomic_ templates, and exercising + * load and store briefly. + */ +template +void test_atomic_add() +{ + struct { + volatile T nonatomic1; + T atomic1; + T atomic2; + volatile T nonatomic2; + } data; + + data.nonatomic1 = 0; + core_util_atomic_store(&data.atomic1, T(0)); + core_util_atomic_store(&data.atomic2, T(0)); + data.nonatomic2 = 0; + + Thread t1(osPriorityNormal, THREAD_STACK); + Thread t2(osPriorityNormal, THREAD_STACK); + Thread t3(osPriorityNormal, THREAD_STACK); + Thread t4(osPriorityNormal, THREAD_STACK); + + TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn, &data.atomic1))); + TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn, &data.atomic1))); + TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn, &data.atomic2))); + TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn, &data.atomic2))); + + for (long i = ADD_ITERATIONS; i > 0; i--) { + data.nonatomic1++; + data.nonatomic2++; + } + + t1.join(); + t2.join(); + t3.join(); + t4.join(); + + TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1); + TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1)); + TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2)); + TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2); +} + +} // namespace + +utest::v1::status_t test_setup(const size_t number_of_cases) +{ + GREENTEA_SETUP(30, "default_auto"); + return utest::v1::verbose_test_setup_handler(number_of_cases); +} + +Case cases[] = { + Case("Test atomic add 8-bit", test_atomic_add), + Case("Test atomic add 16-bit", test_atomic_add), + Case("Test atomic add 32-bit", test_atomic_add), + Case("Test atomic add 64-bit", test_atomic_add), + Case("Test atomic add signed 8-bit", test_atomic_add), + Case("Test atomic add signed 16-bit", test_atomic_add), + Case("Test atomic add signed 32-bit", test_atomic_add), + Case("Test atomic add signed 64-bit", test_atomic_add), + Case("Test atomic add release 32-bit", test_atomic_add), + Case("Test atomic sub 8-bit", test_atomic_add), + Case("Test atomic sub 16-bit", test_atomic_add), + Case("Test atomic sub 32-bit", test_atomic_add), + Case("Test atomic sub 64-bit", test_atomic_add), + Case("Test atomic bitops 8-bit", test_atomic_add), + Case("Test atomic bitops 16-bit", test_atomic_add), + Case("Test atomic bitops 32-bit", test_atomic_add), + Case("Test atomic bitops 64-bit", test_atomic_add), + Case("Test atomic compare exchange weak 8-bit", test_atomic_add), + Case("Test atomic compare exchange weak 16-bit", test_atomic_add), + Case("Test atomic compare exchange weak 32-bit", test_atomic_add), + Case("Test atomic compare exchange weak 64-bit", test_atomic_add), + Case("Test atomic compare exchange strong 8-bit", test_atomic_add), + Case("Test atomic compare exchange strong 16-bit", test_atomic_add), + Case("Test atomic compare exchange strong 32-bit", test_atomic_add), + Case("Test atomic compare exchange strong 64-bit", test_atomic_add) +}; + +utest::v1::Specification specification(test_setup, cases); + +int main() +{ + return !utest::v1::Harness::run(specification); +} From 4fe06e0949f470e918cc84d56c45701082af5808 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Wed, 27 Mar 2019 16:14:27 +0200 Subject: [PATCH 3/4] Critical tests: use atomics Get rid of a volatile, and use atomics to synchronise with the interrupt routine instead. Useful as a non-RTOS basic compilation check for the atomics - the fuller atomic test relies on the RTOS. --- TESTS/mbed_platform/critical_section/main.cpp | 35 +++++++++---------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/TESTS/mbed_platform/critical_section/main.cpp b/TESTS/mbed_platform/critical_section/main.cpp index 43ce94c4fcf..3a30f915e3d 100644 --- a/TESTS/mbed_platform/critical_section/main.cpp +++ b/TESTS/mbed_platform/critical_section/main.cpp @@ -22,11 +22,11 @@ using utest::v1::Case; -volatile bool callback_called; +bool callback_called; void tiemout_callback(void) { - callback_called = true; + core_util_atomic_store(&callback_called, true); } template @@ -49,7 +49,7 @@ void critical_section_raii_recursive(Timeout &timeout) wait_us(wait_time_us); } TEST_ASSERT_TRUE(core_util_in_critical_section()); - TEST_ASSERT_FALSE(callback_called); + TEST_ASSERT_FALSE(core_util_atomic_load(&callback_called)); } @@ -82,31 +82,30 @@ void test_C_API(void) TEST_ASSERT_FALSE(core_util_in_critical_section()); - callback_called = false; + core_util_atomic_store(&callback_called, false); timeout.attach_us(callback(tiemout_callback), timeout_time_us); wait_us(wait_time_us); - TEST_ASSERT_TRUE(callback_called); + TEST_ASSERT_TRUE(core_util_atomic_exchange(&callback_called, false)); for (int i = 0; i < N; i++) { core_util_critical_section_enter(); TEST_ASSERT_TRUE(core_util_in_critical_section()); } - callback_called = false; timeout.attach_us(callback(tiemout_callback), timeout_time_us); wait_us(wait_time_us); - TEST_ASSERT_FALSE(callback_called); + TEST_ASSERT_FALSE(core_util_atomic_load(&callback_called)); TEST_ASSERT_TRUE(core_util_in_critical_section()); for (int i = 0; i < N - 1; i++) { core_util_critical_section_exit(); TEST_ASSERT_TRUE(core_util_in_critical_section()); - TEST_ASSERT_FALSE(callback_called); + TEST_ASSERT_FALSE(core_util_atomic_load(&callback_called)); } core_util_critical_section_exit(); TEST_ASSERT_FALSE(core_util_in_critical_section()); - TEST_ASSERT_TRUE(callback_called); + TEST_ASSERT_TRUE(core_util_atomic_load(&callback_called)); } /** Template for tests @@ -138,16 +137,15 @@ void test_CPP_API_constructor_destructor(void) TEST_ASSERT_FALSE(core_util_in_critical_section()); - callback_called = false; + core_util_atomic_store(&callback_called, false); timeout.attach_us(callback(tiemout_callback), timeout_time_us); wait_us(wait_time_us); - TEST_ASSERT_TRUE(callback_called); + TEST_ASSERT_TRUE(core_util_atomic_exchange(&callback_called, false)); - callback_called = false; critical_section_raii_recursive(timeout); TEST_ASSERT_FALSE(core_util_in_critical_section()); - TEST_ASSERT_TRUE(callback_called); + TEST_ASSERT_TRUE(core_util_atomic_load(&callback_called)); } /** Template for tests @@ -179,31 +177,30 @@ void test_CPP_API_enable_disable(void) TEST_ASSERT_FALSE(core_util_in_critical_section()); - callback_called = false; + core_util_atomic_store(&callback_called, false); timeout.attach_us(callback(tiemout_callback), timeout_time_us); wait_us(wait_time_us); - TEST_ASSERT_TRUE(callback_called); + TEST_ASSERT_TRUE(core_util_atomic_exchange(&callback_called, false)); for (int i = 0; i < N; i++) { CriticalSectionLock::enable(); TEST_ASSERT_TRUE(core_util_in_critical_section()); } - callback_called = false; timeout.attach_us(callback(tiemout_callback), timeout_time_us); wait_us(wait_time_us); - TEST_ASSERT_FALSE(callback_called); + TEST_ASSERT_FALSE(core_util_atomic_load(&callback_called)); TEST_ASSERT_TRUE(core_util_in_critical_section()); for (int i = 0; i < N - 1; i++) { CriticalSectionLock::disable(); TEST_ASSERT_TRUE(core_util_in_critical_section()); - TEST_ASSERT_FALSE(callback_called); + TEST_ASSERT_FALSE(core_util_atomic_load(&callback_called)); } CriticalSectionLock::disable(); TEST_ASSERT_FALSE(core_util_in_critical_section()); - TEST_ASSERT_TRUE(callback_called); + TEST_ASSERT_TRUE(core_util_atomic_load(&callback_called)); } From 2bb40450ecfb3f43104ad5a5fda9b581882df84f Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Fri, 29 Mar 2019 14:39:49 +0200 Subject: [PATCH 4/4] Add non-volatile atomic loads and stores Volatile makes no real difference when we're using assembler, or locked functions, but leaving it off could be more efficient for the basic loads and stores. So add non-volatile overloads in C++ for them. --- platform/internal/mbed_atomic_impl.h | 110 +++++++++++++++++++++++---- platform/mbed_atomic.h | 16 ++++ 2 files changed, 113 insertions(+), 13 deletions(-) diff --git a/platform/internal/mbed_atomic_impl.h b/platform/internal/mbed_atomic_impl.h index cbabd4b708d..43dcdd80a70 100644 --- a/platform/internal/mbed_atomic_impl.h +++ b/platform/internal/mbed_atomic_impl.h @@ -613,15 +613,15 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_ /* Lock-free loads and stores don't need assembler - just aligned accesses */ /* Silly ordering of `T volatile` is because T can be `void *` */ -#define DO_MBED_LOCKFREE_LOADSTORE(T, fn_suffix) \ -MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \ +#define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \ { \ T value = *valuePtr; \ MBED_BARRIER(); \ return value; \ } \ \ -MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \ +MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \ { \ MBED_CHECK_LOAD_ORDER(order); \ T value = *valuePtr; \ @@ -629,14 +629,14 @@ MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile * return value; \ } \ \ -MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \ +MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \ { \ MBED_BARRIER(); \ *valuePtr = value; \ MBED_BARRIER(); \ } \ \ -MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \ +MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \ { \ MBED_CHECK_STORE_ORDER(order); \ MBED_RELEASE_BARRIER(order); \ @@ -658,15 +658,51 @@ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_at flagPtr->_flag = false; MBED_SEQ_CST_BARRIER(order); } -DO_MBED_LOCKFREE_LOADSTORE(uint8_t, u8) -DO_MBED_LOCKFREE_LOADSTORE(uint16_t, u16) -DO_MBED_LOCKFREE_LOADSTORE(uint32_t, u32) -DO_MBED_LOCKFREE_LOADSTORE(int8_t, s8) -DO_MBED_LOCKFREE_LOADSTORE(int16_t, s16) -DO_MBED_LOCKFREE_LOADSTORE(int32_t, s32) -DO_MBED_LOCKFREE_LOADSTORE(bool, bool) -DO_MBED_LOCKFREE_LOADSTORE(void *, ptr) +#ifdef __cplusplus +// Temporarily turn off extern "C", so we can provide non-volatile load/store +// overloads for efficiency. All these functions are static inline, so this has +// no linkage effect exactly, it just permits the overloads. +} // extern "C" + +// For efficiency it's worth having non-volatile overloads +MBED_FORCEINLINE void core_util_atomic_flag_clear(core_util_atomic_flag *flagPtr) +{ + MBED_BARRIER(); + flagPtr->_flag = false; + MBED_BARRIER(); +} + +MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(core_util_atomic_flag *flagPtr, mbed_memory_order order) +{ + MBED_RELEASE_BARRIER(order); + flagPtr->_flag = false; + MBED_SEQ_CST_BARRIER(order); +} + +DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8) +DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16) +DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32) +DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8) +DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16) +DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32) +DO_MBED_LOCKFREE_LOADSTORE(bool,, bool) +DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr) + +#endif + +DO_MBED_LOCKFREE_LOADSTORE(uint8_t, volatile, u8) +DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16) +DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32) +DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8) +DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16) +DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32) +DO_MBED_LOCKFREE_LOADSTORE(bool, volatile, bool) +DO_MBED_LOCKFREE_LOADSTORE(void *, volatile, ptr) + +#ifdef __cplusplus +extern "C" { +#endif /********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/ @@ -975,7 +1011,19 @@ inline T core_util_atomic_load(const volatile T *valuePtr) } \ \ template<> \ +inline T core_util_atomic_load(const T *valuePtr) \ +{ \ + return core_util_atomic_load_##fn_suffix(valuePtr); \ +} \ + \ +template<> \ inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \ +{ \ + return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ +} \ + \ +template<> \ +inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \ { \ return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ } @@ -986,12 +1034,24 @@ inline T *core_util_atomic_load(T *const volatile *valuePtr) return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr); } +template +inline T *core_util_atomic_load(T *const *valuePtr) +{ + return (T *) core_util_atomic_load_ptr((void *const *) valuePtr); +} + template inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) { return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order); } +template +inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) +{ + return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order); +} + DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8) DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16) DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32) @@ -1010,7 +1070,19 @@ inline void core_util_atomic_store(volatile T *valuePtr, T val) } \ \ template<> \ +inline void core_util_atomic_store(T *valuePtr, T val) \ +{ \ + core_util_atomic_store_##fn_suffix(valuePtr, val); \ +} \ + \ +template<> \ inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \ +{ \ + core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ +} \ + \ +template<> \ +inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \ { \ core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ } @@ -1021,12 +1093,24 @@ inline void core_util_atomic_store(T *volatile *valuePtr, T *val) core_util_atomic_store_ptr((void *volatile *) valuePtr, val); } +template +inline void core_util_atomic_store(T **valuePtr, T *val) +{ + core_util_atomic_store_ptr((void **) valuePtr, val); +} + template inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) { core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order); } +template +inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) +{ + core_util_atomic_store_ptr((void **) valuePtr, val, order); +} + DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8) DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16) DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32) diff --git a/platform/mbed_atomic.h b/platform/mbed_atomic.h index 4852c9bef26..ebdba7eb0a0 100644 --- a/platform/mbed_atomic.h +++ b/platform/mbed_atomic.h @@ -896,8 +896,12 @@ MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint6 /** \copydoc core_util_atomic_load_u8 */ template T core_util_atomic_load(const volatile T *valuePtr); +/** \copydoc core_util_atomic_load_u8 */ +template T core_util_atomic_load(const T *valuePtr); /** \copydoc core_util_atomic_store_u8 */ template void core_util_atomic_store(volatile T *valuePtr, T desiredValue); +/** \copydoc core_util_atomic_store_u8 */ +template void core_util_atomic_store(T *valuePtr, T desiredValue); /** \copydoc core_util_atomic_exchange_u8 */ template T core_util_atomic_exchange(volatile T *ptr, T desiredValue); /** \copydoc core_util_atomic_cas_u8 */ @@ -917,8 +921,12 @@ template T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg); /** \copydoc core_util_atomic_load_explicit_u8 */ template T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order); +/** \copydoc core_util_atomic_load_explicit_u8 */ +template T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order); /** \copydoc core_util_atomic_store_explicit_u8 */ template void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_store_explicit_u8 */ +template void core_util_atomic_store_explicit(T *valuePtr, T desiredValue, mbed_memory_order order); /** \copydoc core_util_atomic_exchange_explicit_u8 */ template T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order); /** \copydoc core_util_atomic_cas_explicit_u8 */ @@ -938,8 +946,12 @@ template T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, /** \copydoc core_util_atomic_load_ptr */ template inline T *core_util_atomic_load(T *const volatile *valuePtr); +/** \copydoc core_util_atomic_load_ptr */ +template inline T *core_util_atomic_load(T *const *valuePtr); /** \copydoc core_util_atomic_store_ptr */ template inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue); +/** \copydoc core_util_atomic_store_ptr */ +template inline void core_util_atomic_store(T **valuePtr, T *desiredValue); /** \copydoc core_util_atomic_exchange_ptr */ template inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue); /** \copydoc core_util_atomic_cas_ptr */ @@ -953,8 +965,12 @@ template inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, /** \copydoc core_util_atomic_load_explicit_ptr */ template inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order); +/** \copydoc core_util_atomic_load_explicit_ptr */ +template inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order); /** \copydoc core_util_atomic_store_explicit_ptr */ template inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_store_explicit_ptr */ +template inline void core_util_atomic_store_explicit(T **valuePtr, T *desiredValue, mbed_memory_order order); /** \copydoc core_util_atomic_exchange_explicit_ptr */ template inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); /** \copydoc core_util_atomic_cas_explicit_ptr */