-
Notifications
You must be signed in to change notification settings - Fork 967
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Rework lock_core / timers #378
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,26 +8,183 @@ | |
#define _PICO_LOCK_CORE_H | ||
|
||
#include "pico.h" | ||
#include "pico/time.h" | ||
#include "hardware/sync.h" | ||
|
||
/** \file lock_core.h | ||
* \defgroup lock_core lock_core | ||
* \ingroup pico_sync | ||
* \brief base synchronization/lock primitive support | ||
* | ||
* Most of the pico_sync locking primitives contain a lock_core_t structure member. This currently just holds a spin | ||
* lock which is used only to protect the contents of the rest of the structure as part of implementing the synchronization | ||
* primitive. As such, the spin_lock member of lock core is never still held on return from any function for the primitive. | ||
* | ||
* \ref critical_section is an exceptional case in that it does not have a lock_core_t and simply wraps a pin lock, providing | ||
* methods to lock and unlock said spin lock. | ||
* | ||
* lock_core based structures work by locking the spin lock, checking state, and then deciding whether they additionally need to block | ||
* or notify when the spin lock is released. In the blocking case, they will wake up again in the future, and try the process again. | ||
* | ||
* By default the SDK just uses the processors' events via SEV and WEV for notification and blocking as these are sufficient for | ||
* cross core, and notification from interrupt handlers. However macros are defined in this file that abstract the wait | ||
* and notify mechanisms to allow the SDK locking functions to effectively be used within an RTOS on other environment. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "RTOS on other environment" -> "RTOS or other environment" ? |
||
* | ||
* When implementing an RTOS, it is desirable for the SDK synchronization primitives that wait, to block the calling task (and immediately yield), | ||
* and those that notify, to wake a blocked task which isn't on processor. At least the wait macro implementation needs to be atomic with the protecting | ||
* spin_lock unlock from the callers point of view; i.e. the task should unlock the spin lock when as it starts its wait. Such implementation is | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "spin lock when as it starts" -> "spin lock when it starts" ? |
||
* up to the RTOS integration, however the macros are defined such that such operations are always combined into a single call | ||
* (so they can be perfomed atomically) even though the default implementation does not need this, as a WFE which starts | ||
* following the corresponding SEV is not missed. | ||
*/ | ||
|
||
// PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_LOCK_CORE, Enable/disable assertions in the lock core, type=bool, default=0, group=pico_sync | ||
#ifndef PARAM_ASSERTIONS_ENABLED_LOCK_CORE | ||
#define PARAM_ASSERTIONS_ENABLED_LOCK_CORE 0 | ||
#endif | ||
|
||
/** \file lock_core.h | ||
* \ingroup pico_sync | ||
* \ingroup lock_core | ||
* | ||
* Base implementation for locking primitives protected by a spin lock | ||
* Base implementation for locking primitives protected by a spin lock. The spin lock is only used to protect | ||
* access to the remaining lock state (in primitives using lock_core); it is never left locked outside | ||
* of the function implementations | ||
*/ | ||
typedef struct lock_core { | ||
struct lock_core { | ||
// spin lock protecting this lock's state | ||
spin_lock_t *spin_lock; | ||
|
||
// note any lock members in containing structures need not be volatile; | ||
// they are protected by memory/compiler barriers when gaining and release spin locks | ||
} lock_core_t; | ||
}; | ||
|
||
typedef struct lock_core lock_core_t; | ||
|
||
/*! \brief Initialise a lock structure | ||
* \ingroup lock_core | ||
* | ||
* Inititalize a lock structure, providing the spin lock number to use for protecting internal state. | ||
* | ||
* \param core Pointer to the lock_core to initialize | ||
* \param lock_num Spin lock number to use for the lock. As the spin lock is only used internally to the locking primitive | ||
* method implementations, this does not need to be globally unique, however could suffer contention | ||
*/ | ||
void lock_init(lock_core_t *core, uint lock_num); | ||
|
||
#ifndef lock_owner_id_t | ||
/*! \brief type to use to store the 'owner' of a lock. | ||
* \ingroup lock_core | ||
* By default this is int8_t as it only needs to store the core number or -1, however it may be | ||
* overridden if a larger type is required (e.g. for an RTOS task id) | ||
*/ | ||
#define lock_owner_id_t int8_t | ||
#endif | ||
|
||
#ifndef LOCK_INVALID_OWNER_ID | ||
/*! \brief marker value to use for a lock_owner_id_t which does not refer to any valid owner | ||
* \ingroup lock_core | ||
*/ | ||
#define LOCK_INVALID_OWNER_ID ((lock_owner_id_t)-1) | ||
#endif | ||
|
||
#ifndef lock_get_caller_owner_id | ||
/*! \brief return the owner id for the caller | ||
* \ingroup lock_core | ||
* By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id) | ||
*/ | ||
#define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num()) | ||
#endif | ||
|
||
#ifndef lock_internal_spin_unlock_with_wait | ||
/*! \brief Atomically unlock the lock's spin lock, and wait for a notification. | ||
* \ingroup lock_core | ||
* | ||
* _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify | ||
* to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing | ||
* a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify | ||
* for the same lock, which completes after this call starts. | ||
* | ||
* In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify | ||
* has subsequently been called on the same lock instance, however this method is free to return at _any_ point before that; | ||
* this macro is _always_ used in a loop which locks the spin lock, checks the internal locking primitive state and then | ||
* waits again if the calling thread should not proceed. | ||
* | ||
* By default this macro simply unlocks the spin lock, and then performs a WFE, but may be overridden | ||
* (e.g. to actually block the RTOS task). | ||
Comment on lines
+107
to
+113
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Mixture of using "this method" and "this macro" (although I dunno if this difference is significant?) |
||
* | ||
* \param lock the lock_core for the primitive which needs to block | ||
* \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the `PRIMASK` | ||
* state when the spin lock was acquire | ||
*/ | ||
#define lock_internal_spin_unlock_with_wait(lock, save) spin_unlock((lock)->spin_lock, save), __wfe() | ||
#endif | ||
|
||
#ifndef lock_internal_spin_unlock_with_notify | ||
/*! \brief Atomically unlock the lock's spin lock, and send a notification | ||
* \ingroup lock_core | ||
* | ||
* _Atomic_ here refers to the fact that it should not be possible for this notification to happen during a | ||
* lock_internal_spin_unlock_with_wait in a way that that wait does not see the notification (i.e. causing | ||
* a missed notification). In other words this method should always wake up any lock_internal_spin_unlock_with_wait | ||
* which started before this call completes. | ||
* | ||
* In an ideal implementation, this method would wake up only the corresponding lock_internal_spin_unlock_with_wait | ||
* that has been called on the same lock instance, however it is free to wake up any of them, as they will check | ||
* their condition and then re-wait if necessary/ | ||
* | ||
* By default this macro simply unlocks the spin lock, and then performs a SEV, but may be overridden | ||
* (e.g. to actually un-block RTOS task(s)). | ||
* | ||
* \param lock the lock_core for the primitive which needs to block | ||
* \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK | ||
* state when the spin lock was acquire) | ||
*/ | ||
#define lock_internal_spin_unlock_with_notify(lock, save) spin_unlock((lock)->spin_lock, save), __sev() | ||
#endif | ||
|
||
#ifndef lock_internal_spin_unlock_with_best_effort_wait_or_timeout | ||
/*! \brief Atomically unlock the lock's spin lock, and wait for a notification or a timeout | ||
* \ingroup lock_core | ||
* | ||
* _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify | ||
* to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing | ||
* a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify | ||
* for the same lock, which completes after this call starts. | ||
* | ||
* In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify | ||
* has subsequently been called on the same lock instance or the timeout has been reached, however this method is free to return | ||
* at _any_ point before that; this macro is _always_ used in a loop which locks the spin lock, checks the internal locking | ||
* primitive state and then waits again if the calling thread should not proceed. | ||
* | ||
* By default this simply unlocks the spin lock, and then calls \ref best_effort_wfe_or_timeout | ||
* but may be overridden (e.g. to actually block the RTOS task with a timeout). | ||
* | ||
* \param lock the lock_core for the primitive which needs to block | ||
* \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK | ||
* state when the spin lock was acquire) | ||
* \param until the \ref absolute_time_t value | ||
* \return true if the timeout has been reached | ||
*/ | ||
#define lock_internal_spin_unlock_with_best_effort_wait_or_timeout(lock, save, until) ({ \ | ||
spin_unlock((lock)->spin_lock, save); \ | ||
best_effort_wfe_or_timeout(until); \ | ||
}) | ||
#endif | ||
|
||
#ifndef sync_internal_yield_until_before | ||
/*! \brief yield to other processing until some time before the requested time | ||
* \ingroup lock_core | ||
* | ||
* This method is provided for cases where the caller has no useful work to do | ||
* until the specified time. | ||
* | ||
* By default this method does nothing, however if can be overridden (for example by an | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "if can" -> "it can" |
||
* RTOS which is able to block the current task until the scheduler tick before | ||
* the given time) | ||
* | ||
* \param until the \ref absolute_time_t value | ||
*/ | ||
#define sync_internal_yield_until_before(until) ((void)0) | ||
#endif | ||
|
||
#endif |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,19 +28,33 @@ extern "C" { | |
* | ||
* See \ref critical_section.h for protecting access between multiple cores AND IRQ handlers | ||
*/ | ||
|
||
typedef struct __packed_aligned mutex { | ||
lock_core_t core; | ||
int8_t owner; //! core number or -1 for unowned | ||
lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned | ||
uint8_t recursion_state; //! 0 means non recursive (owner or unowned) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder if it'd have been worth making this |
||
//! 1 is a maxed out recursive lock | ||
//! 2-254 is an owned lock | ||
//! 255 is an un-owned lock | ||
} mutex_t; | ||
|
||
#define MAX_RECURSION_STATE ((uint8_t)255) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hopefully people won't accidentally assume that |
||
|
||
/*! \brief Initialise a mutex structure | ||
* \ingroup mutex | ||
* | ||
* \param mtx Pointer to mutex structure | ||
*/ | ||
void mutex_init(mutex_t *mtx); | ||
|
||
/*! \brief Initialise a recursive mutex structure | ||
* \ingroup mutex | ||
* | ||
* A recursive mutex may be entered in a nested fashion by the same owner | ||
* | ||
* \param mtx Pointer to mutex structure | ||
*/ | ||
void recursive_mutex_init(mutex_t *mtx); | ||
|
||
/*! \brief Take ownership of a mutex | ||
* \ingroup mutex | ||
* | ||
|
@@ -129,6 +143,29 @@ static inline bool mutex_is_initialzed(mutex_t *mtx) { | |
*/ | ||
#define auto_init_mutex(name) static __attribute__((section(".mutex_array"))) mutex_t name | ||
|
||
/*! \brief Helper macro for static definition of recursive mutexes | ||
* \ingroup mutex | ||
* | ||
* A recursive mutex defined as follows: | ||
* | ||
* ```c | ||
* auto_init_recursive_mutex(my_mutex); | ||
* ``` | ||
* | ||
* Is equivalent to doing | ||
* | ||
* ```c | ||
* static mutex_t my_mutex; | ||
* | ||
* void my_init_function() { | ||
* recursive_mutex_init(&my_mutex); | ||
* } | ||
* ``` | ||
* | ||
* But the initialization of the mutex is performed automatically during runtime initialization | ||
*/ | ||
#define auto_init_recursive_mutex(name) static __attribute__((section(".mutex_array"))) mutex_t name = { .recursion_state = MAX_RECURSION_STATE } | ||
|
||
#ifdef __cplusplus | ||
} | ||
#endif | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"wraps a pin lock" -> "wraps a spin lock" ?