From 56ab5841900e57b45f3aa11de31ac99c75aa006c Mon Sep 17 00:00:00 2001 From: Radoslaw Koppel Date: Wed, 27 Nov 2019 14:20:37 +0100 Subject: [PATCH] kernel: thread: k_thread_foreach_unlocked: Implement Implement thread foreach processing with limited locking to allow threads processing that may take more time but allows missing some threads processing when the thread list is modified. Signed-off-by: Radoslaw Koppel --- include/kernel.h | 38 ++++++++++++++++++++++++++++++++++---- kernel/thread.c | 18 ++++++++++++++++++ 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index 23c0fd17f02c5b..39d8c9dc17d087 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -619,15 +619,45 @@ typedef void (*k_thread_user_cb_t)(const struct k_thread *thread, * @param user_data Pointer to user data. * * @note CONFIG_THREAD_MONITOR must be set for this function - * to be effective. Also this API uses irq_lock to protect the - * _kernel.threads list which means creation of new threads and - * terminations of existing threads are blocked until this - * API returns. + * to be effective. + * @note This API uses @ref k_spin_lock to protect the _kernel.threads + * list which means creation of new threads and terminations of existing + * threads are blocked until this API returns. * * @return N/A */ extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data); +/** + * @brief Iterate over all the threads in the system without locking. + * + * This routine works exactly the same like @ref k_thread_foreach + * but unlocks interrupts when user_cb is executed. + * + * @param user_cb Pointer to the user callback function. + * @param user_data Pointer to user data. + * + * @note CONFIG_THREAD_MONITOR must be set for this function + * to be effective. + * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads + * queue elements. It unlocks it during user callback function processing. + * If a new task is created when this @c foreach function is in progress, + * the added new task would not be included in the enumeration. + * If a task is aborted during this enumeration, there would be a race here + * and there is a possibility that this aborted task would be included in the + * enumeration. + * @note If the task is aborted and the memory occupied by its @c k_thread + * structure is reused when this @c k_thread_foreach_unlocked is in progress + * it might even lead to the system behave unstable. + * This function may never return, as it would follow some @c next task + * pointers treating given pointer as a pointer to the k_thread structure + * while it is something different right now. + * Do not reuse the memory that was occupied by k_thread structure of aborted + * task if it was aborted after this function was called in any context. + */ +extern void k_thread_foreach_unlocked( + k_thread_user_cb_t user_cb, void *user_data); + /** @} */ /** diff --git a/kernel/thread.c b/kernel/thread.c index f57d148efb24c2..300e3e8e570abd 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -55,6 +55,24 @@ void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) #endif } +void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data) +{ +#if defined(CONFIG_THREAD_MONITOR) + struct k_thread *thread; + k_spinlock_key_t key; + + __ASSERT(user_cb != NULL, "user_cb can not be NULL"); + + key = k_spin_lock(&lock); + for (thread = _kernel.threads; thread; thread = thread->next_thread) { + k_spin_unlock(&lock, key); + user_cb(thread, user_data); + key = k_spin_lock(&lock); + } + k_spin_unlock(&lock, key); +#endif +} + bool k_is_in_isr(void) { return arch_is_in_isr();