From eb3fe22395a642f39ae33a82b1c6d939765979e6 Mon Sep 17 00:00:00 2001 From: graham sanderson Date: Tue, 4 May 2021 17:00:44 -0500 Subject: [PATCH] add __always_inline to trivial super low level inline functions --- .../include/hardware/address_mapped.h | 8 ++--- .../hardware_sync/include/hardware/sync.h | 34 +++++++++---------- .../pico_platform/include/pico/platform.h | 12 +++---- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/rp2_common/hardware_base/include/hardware/address_mapped.h b/src/rp2_common/hardware_base/include/hardware/address_mapped.h index 1c1b29c61..46a774091 100644 --- a/src/rp2_common/hardware_base/include/hardware/address_mapped.h +++ b/src/rp2_common/hardware_base/include/hardware/address_mapped.h @@ -84,7 +84,7 @@ typedef ioptr const const_ioptr; * \param addr Address of writable register * \param mask Bit-mask specifying bits to set */ -inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) { +__always_inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) { *(io_rw_32 *) hw_set_alias_untyped((volatile void *) addr) = mask; } @@ -94,7 +94,7 @@ inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) { * \param addr Address of writable register * \param mask Bit-mask specifying bits to clear */ -inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) { +__always_inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) { *(io_rw_32 *) hw_clear_alias_untyped((volatile void *) addr) = mask; } @@ -104,7 +104,7 @@ inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) { * \param addr Address of writable register * \param mask Bit-mask specifying bits to invert */ -inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) { +__always_inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) { *(io_rw_32 *) hw_xor_alias_untyped((volatile void *) addr) = mask; } @@ -120,7 +120,7 @@ inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) { * \param values Bits values * \param write_mask Mask of bits to change */ -inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) { +__always_inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) { hw_xor_bits(addr, (*addr ^ values) & write_mask); } diff --git a/src/rp2_common/hardware_sync/include/hardware/sync.h b/src/rp2_common/hardware_sync/include/hardware/sync.h index 549d1d723..a4332650e 100644 --- a/src/rp2_common/hardware_sync/include/hardware/sync.h +++ b/src/rp2_common/hardware_sync/include/hardware/sync.h @@ -80,7 +80,7 @@ typedef volatile uint32_t spin_lock_t; * The SEV (send event) instruction sends an event to both cores. */ -inline static void __sev(void) { +__always_inline static void __sev(void) { __asm volatile ("sev"); } @@ -90,7 +90,7 @@ inline static void __sev(void) { * The WFE (wait for event) instruction waits until one of a number of * events occurs, including events signalled by the SEV instruction on either core. */ -inline static void __wfe(void) { +__always_inline static void __wfe(void) { __asm volatile ("wfe"); } @@ -99,7 +99,7 @@ inline static void __wfe(void) { * * The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core. */ -inline static void __wfi(void) { +__always_inline static void __wfi(void) { __asm volatile ("wfi"); } @@ -109,7 +109,7 @@ inline static void __wfi(void) { * The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this * instruction will be observed before any explicit access after the instruction. */ -inline static void __dmb(void) { +__always_inline static void __dmb(void) { __asm volatile ("dmb" : : : "memory"); } @@ -120,7 +120,7 @@ inline static void __dmb(void) { * memory barrier (DMB). The DSB operation completes when all explicit memory * accesses before this instruction complete. */ -inline static void __dsb(void) { +__always_inline static void __dsb(void) { __asm volatile ("dsb" : : : "memory"); } @@ -131,14 +131,14 @@ inline static void __dsb(void) { * so that all instructions following the ISB are fetched from cache or memory again, after * the ISB instruction has been completed. */ -inline static void __isb(void) { +__always_inline static void __isb(void) { __asm volatile ("isb"); } /*! \brief Acquire a memory fence * \ingroup hardware_sync */ -inline static void __mem_fence_acquire(void) { +__always_inline static void __mem_fence_acquire(void) { // the original code below makes it hard for us to be included from C++ via a header // which itself is in an extern "C", so just use __dmb instead, which is what // is required on Cortex M0+ @@ -154,7 +154,7 @@ inline static void __mem_fence_acquire(void) { * \ingroup hardware_sync * */ -inline static void __mem_fence_release(void) { +__always_inline static void __mem_fence_release(void) { // the original code below makes it hard for us to be included from C++ via a header // which itself is in an extern "C", so just use __dmb instead, which is what // is required on Cortex M0+ @@ -171,7 +171,7 @@ inline static void __mem_fence_release(void) { * * \return The prior interrupt enable status for restoration later via restore_interrupts() */ -inline static uint32_t save_and_disable_interrupts(void) { +__always_inline static uint32_t save_and_disable_interrupts(void) { uint32_t status; __asm volatile ("mrs %0, PRIMASK" : "=r" (status)::); __asm volatile ("cpsid i"); @@ -183,7 +183,7 @@ inline static uint32_t save_and_disable_interrupts(void) { * * \param status Previous interrupt status from save_and_disable_interrupts() */ -inline static void restore_interrupts(uint32_t status) { +__always_inline static void restore_interrupts(uint32_t status) { __asm volatile ("msr PRIMASK,%0"::"r" (status) : ); } @@ -193,7 +193,7 @@ inline static void restore_interrupts(uint32_t status) { * \param lock_num Spinlock ID * \return The spinlock instance */ -inline static spin_lock_t *spin_lock_instance(uint lock_num) { +__always_inline static spin_lock_t *spin_lock_instance(uint lock_num) { invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS); return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4); } @@ -204,7 +204,7 @@ inline static spin_lock_t *spin_lock_instance(uint lock_num) { * \param lock The Spinlock instance * \return The Spinlock ID */ -inline static uint spin_lock_get_num(spin_lock_t *lock) { +__always_inline static uint spin_lock_get_num(spin_lock_t *lock) { invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET || (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET || ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0); @@ -216,7 +216,7 @@ inline static uint spin_lock_get_num(spin_lock_t *lock) { * * \param lock Spinlock instance */ -inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) { +__always_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) { // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core // anyway which should be finished soon @@ -229,7 +229,7 @@ inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) { * * \param lock Spinlock instance */ -inline static void spin_unlock_unsafe(spin_lock_t *lock) { +__always_inline static void spin_unlock_unsafe(spin_lock_t *lock) { __mem_fence_release(); *lock = 0; } @@ -242,7 +242,7 @@ inline static void spin_unlock_unsafe(spin_lock_t *lock) { * \param lock Spinlock instance * \return interrupt status to be used when unlocking, to restore to original state */ -inline static uint32_t spin_lock_blocking(spin_lock_t *lock) { +__always_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) { uint32_t save = save_and_disable_interrupts(); spin_lock_unsafe_blocking(lock); return save; @@ -270,7 +270,7 @@ inline static bool is_spin_locked(spin_lock_t *lock) { * * \sa spin_lock_blocking() */ -inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) { +__always_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) { spin_unlock_unsafe(lock); restore_interrupts(saved_irq); } @@ -280,7 +280,7 @@ inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) { * * \return The core number the call was made from */ -static inline uint get_core_num(void) { +__always_inline static uint get_core_num(void) { return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET)); } diff --git a/src/rp2_common/pico_platform/include/pico/platform.h b/src/rp2_common/pico_platform/include/pico/platform.h index e17dbca87..59c7424f0 100644 --- a/src/rp2_common/pico_platform/include/pico/platform.h +++ b/src/rp2_common/pico_platform/include/pico/platform.h @@ -45,7 +45,7 @@ extern "C" { * Decorates a function name, such that the function will execute from RAM, explicitly marking it as * noinline to prevent it being inlined into a flash function by the compiler */ -#define __no_inline_not_in_flash_func(func_name) __attribute__((noinline)) __not_in_flash_func(func_name) +#define __no_inline_not_in_flash_func(func_name) __noinline __not_in_flash_func(func_name) #define __packed_aligned __packed __aligned(4) @@ -71,7 +71,7 @@ static inline void __breakpoint(void) { /** * Ensure that the compiler does not move memory access across this method call */ -static inline void __compiler_memory_barrier(void) { +__always_inline static void __compiler_memory_barrier(void) { __asm__ volatile ("" : : : "memory"); } @@ -140,9 +140,9 @@ static inline void tight_loop_contents(void) {} * \param b the second operand * \return a * b */ -inline static int32_t __mul_instruction(int32_t a, int32_t b) { -asm ("mul %0, %1" : "+l" (a) : "l" (b) : ); -return a; +__always_inline static int32_t __mul_instruction(int32_t a, int32_t b) { + asm ("mul %0, %1" : "+l" (a) : "l" (b) : ); + return a; } /** @@ -167,7 +167,7 @@ return a; * Get the current exception level on this core * \return the exception number if the CPU is handling an exception, or 0 otherwise */ -extern uint __get_current_exception(void); +uint __get_current_exception(void); #ifdef __cplusplus }