Skip to content

Commit

Permalink
add __always_inline to trivial super low level inline functions
Browse files Browse the repository at this point in the history
  • Loading branch information
kilograham committed May 4, 2021
1 parent 18c3985 commit eb3fe22
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ typedef ioptr const const_ioptr;
* \param addr Address of writable register
* \param mask Bit-mask specifying bits to set
*/
inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
__always_inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
*(io_rw_32 *) hw_set_alias_untyped((volatile void *) addr) = mask;
}

Expand All @@ -94,7 +94,7 @@ inline static void hw_set_bits(io_rw_32 *addr, uint32_t mask) {
* \param addr Address of writable register
* \param mask Bit-mask specifying bits to clear
*/
inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
__always_inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
*(io_rw_32 *) hw_clear_alias_untyped((volatile void *) addr) = mask;
}

Expand All @@ -104,7 +104,7 @@ inline static void hw_clear_bits(io_rw_32 *addr, uint32_t mask) {
* \param addr Address of writable register
* \param mask Bit-mask specifying bits to invert
*/
inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
__always_inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
*(io_rw_32 *) hw_xor_alias_untyped((volatile void *) addr) = mask;
}

Expand All @@ -120,7 +120,7 @@ inline static void hw_xor_bits(io_rw_32 *addr, uint32_t mask) {
* \param values Bits values
* \param write_mask Mask of bits to change
*/
inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) {
__always_inline static void hw_write_masked(io_rw_32 *addr, uint32_t values, uint32_t write_mask) {
hw_xor_bits(addr, (*addr ^ values) & write_mask);
}

Expand Down
34 changes: 17 additions & 17 deletions src/rp2_common/hardware_sync/include/hardware/sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ typedef volatile uint32_t spin_lock_t;
* The SEV (send event) instruction sends an event to both cores.
*/
inline static void __sev(void) {
__always_inline static void __sev(void) {
__asm volatile ("sev");
}

Expand All @@ -90,7 +90,7 @@ inline static void __sev(void) {
* The WFE (wait for event) instruction waits until one of a number of
* events occurs, including events signalled by the SEV instruction on either core.
*/
inline static void __wfe(void) {
__always_inline static void __wfe(void) {
__asm volatile ("wfe");
}

Expand All @@ -99,7 +99,7 @@ inline static void __wfe(void) {
*
* The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
*/
inline static void __wfi(void) {
__always_inline static void __wfi(void) {
__asm volatile ("wfi");
}

Expand All @@ -109,7 +109,7 @@ inline static void __wfi(void) {
* The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
* instruction will be observed before any explicit access after the instruction.
*/
inline static void __dmb(void) {
__always_inline static void __dmb(void) {
__asm volatile ("dmb" : : : "memory");
}

Expand All @@ -120,7 +120,7 @@ inline static void __dmb(void) {
* memory barrier (DMB). The DSB operation completes when all explicit memory
* accesses before this instruction complete.
*/
inline static void __dsb(void) {
__always_inline static void __dsb(void) {
__asm volatile ("dsb" : : : "memory");
}

Expand All @@ -131,14 +131,14 @@ inline static void __dsb(void) {
* so that all instructions following the ISB are fetched from cache or memory again, after
* the ISB instruction has been completed.
*/
inline static void __isb(void) {
__always_inline static void __isb(void) {
__asm volatile ("isb");
}

/*! \brief Acquire a memory fence
* \ingroup hardware_sync
*/
inline static void __mem_fence_acquire(void) {
__always_inline static void __mem_fence_acquire(void) {
// the original code below makes it hard for us to be included from C++ via a header
// which itself is in an extern "C", so just use __dmb instead, which is what
// is required on Cortex M0+
Expand All @@ -154,7 +154,7 @@ inline static void __mem_fence_acquire(void) {
* \ingroup hardware_sync
*
*/
inline static void __mem_fence_release(void) {
__always_inline static void __mem_fence_release(void) {
// the original code below makes it hard for us to be included from C++ via a header
// which itself is in an extern "C", so just use __dmb instead, which is what
// is required on Cortex M0+
Expand All @@ -171,7 +171,7 @@ inline static void __mem_fence_release(void) {
*
* \return The prior interrupt enable status for restoration later via restore_interrupts()
*/
inline static uint32_t save_and_disable_interrupts(void) {
__always_inline static uint32_t save_and_disable_interrupts(void) {
uint32_t status;
__asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
__asm volatile ("cpsid i");
Expand All @@ -183,7 +183,7 @@ inline static uint32_t save_and_disable_interrupts(void) {
*
* \param status Previous interrupt status from save_and_disable_interrupts()
*/
inline static void restore_interrupts(uint32_t status) {
__always_inline static void restore_interrupts(uint32_t status) {
__asm volatile ("msr PRIMASK,%0"::"r" (status) : );
}

Expand All @@ -193,7 +193,7 @@ inline static void restore_interrupts(uint32_t status) {
* \param lock_num Spinlock ID
* \return The spinlock instance
*/
inline static spin_lock_t *spin_lock_instance(uint lock_num) {
__always_inline static spin_lock_t *spin_lock_instance(uint lock_num) {
invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
}
Expand All @@ -204,7 +204,7 @@ inline static spin_lock_t *spin_lock_instance(uint lock_num) {
* \param lock The Spinlock instance
* \return The Spinlock ID
*/
inline static uint spin_lock_get_num(spin_lock_t *lock) {
__always_inline static uint spin_lock_get_num(spin_lock_t *lock) {
invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
(uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
Expand All @@ -216,7 +216,7 @@ inline static uint spin_lock_get_num(spin_lock_t *lock) {
*
* \param lock Spinlock instance
*/
inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
__always_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
// Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
// with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
// anyway which should be finished soon
Expand All @@ -229,7 +229,7 @@ inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
*
* \param lock Spinlock instance
*/
inline static void spin_unlock_unsafe(spin_lock_t *lock) {
__always_inline static void spin_unlock_unsafe(spin_lock_t *lock) {
__mem_fence_release();
*lock = 0;
}
Expand All @@ -242,7 +242,7 @@ inline static void spin_unlock_unsafe(spin_lock_t *lock) {
* \param lock Spinlock instance
* \return interrupt status to be used when unlocking, to restore to original state
*/
inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
__always_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
uint32_t save = save_and_disable_interrupts();
spin_lock_unsafe_blocking(lock);
return save;
Expand Down Expand Up @@ -270,7 +270,7 @@ inline static bool is_spin_locked(spin_lock_t *lock) {
*
* \sa spin_lock_blocking()
*/
inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
__always_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
spin_unlock_unsafe(lock);
restore_interrupts(saved_irq);
}
Expand All @@ -280,7 +280,7 @@ inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
*
* \return The core number the call was made from
*/
static inline uint get_core_num(void) {
__always_inline static uint get_core_num(void) {
return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
}

Expand Down
12 changes: 6 additions & 6 deletions src/rp2_common/pico_platform/include/pico/platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ extern "C" {
* Decorates a function name, such that the function will execute from RAM, explicitly marking it as
* noinline to prevent it being inlined into a flash function by the compiler
*/
#define __no_inline_not_in_flash_func(func_name) __attribute__((noinline)) __not_in_flash_func(func_name)
#define __no_inline_not_in_flash_func(func_name) __noinline __not_in_flash_func(func_name)

#define __packed_aligned __packed __aligned(4)

Expand All @@ -71,7 +71,7 @@ static inline void __breakpoint(void) {
/**
* Ensure that the compiler does not move memory access across this method call
*/
static inline void __compiler_memory_barrier(void) {
__always_inline static void __compiler_memory_barrier(void) {
__asm__ volatile ("" : : : "memory");
}

Expand Down Expand Up @@ -140,9 +140,9 @@ static inline void tight_loop_contents(void) {}
* \param b the second operand
* \return a * b
*/
inline static int32_t __mul_instruction(int32_t a, int32_t b) {
asm ("mul %0, %1" : "+l" (a) : "l" (b) : );
return a;
__always_inline static int32_t __mul_instruction(int32_t a, int32_t b) {
asm ("mul %0, %1" : "+l" (a) : "l" (b) : );
return a;
}

/**
Expand All @@ -167,7 +167,7 @@ return a;
* Get the current exception level on this core
* \return the exception number if the CPU is handling an exception, or 0 otherwise
*/
extern uint __get_current_exception(void);
uint __get_current_exception(void);

#ifdef __cplusplus
}
Expand Down

0 comments on commit eb3fe22

Please sign in to comment.