Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FL-3841] FuriEventLoop Pt.2 #3703

Merged
merged 60 commits into from
Aug 7, 2024
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
60 commits
Select commit Hold shift + click to select a range
869dad8
Remove all primitive specialisation from FuriEventLoop
gsurkov Jun 11, 2024
82fafd9
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
gsurkov Jun 11, 2024
d9363d3
Implement FuriEventLoop interoperation via inheritance for FuriMessag…
gsurkov Jun 12, 2024
97ed11e
Merge remote-tracking branch 'origin' into gsurkov/3841_event_loop_pt2
gsurkov Jun 12, 2024
ca838d9
Revert "Implement FuriEventLoop interoperation via inheritance for Fu…
gsurkov Jul 4, 2024
e82b5d8
Revert "Remove all primitive specialisation from FuriEventLoop"
gsurkov Jul 4, 2024
7885881
Merge remote-tracking branch 'origin/dev' into gsurkov/3841_event_loo…
gsurkov Jul 4, 2024
0a4d907
Merge remote-tracking branch 'origin/dev' into gsurkov/3841_event_loo…
gsurkov Jul 15, 2024
dbbeef1
Merge remote-tracking branch 'origin/dev' into gsurkov/3841_event_loo…
gsurkov Jul 17, 2024
470eeaa
Abstract primitive type from main logic in FuriEventLoop
gsurkov Jul 17, 2024
18c8c1e
Remove message_queue_i.h
gsurkov Jul 17, 2024
120ccb8
Add stream buffer support for event loop
gsurkov Jul 17, 2024
b1a900f
Add semaphore support for event loop
gsurkov Jul 17, 2024
42ae47c
Add temporary unit test workaround
gsurkov Jul 18, 2024
eb9d385
Make the linter happy
gsurkov Jul 18, 2024
ec3e1c1
Add mutex support for event loop
gsurkov Jul 19, 2024
e29009c
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
gsurkov Jul 19, 2024
648b389
Implement event subscription and unsubscription while the event loop …
gsurkov Jul 23, 2024
eba1575
Fix PVS warnings
gsurkov Jul 23, 2024
ec59dbf
Implement edge events
gsurkov Jul 24, 2024
d1b6825
Fix leftover logical errors
gsurkov Jul 24, 2024
c612771
Add event loop timer example application
gsurkov Jul 24, 2024
4c87f36
Implement flag-based edge trigger and one-shot mode
gsurkov Jul 24, 2024
b5d4174
Add event loop mutex example application
gsurkov Jul 24, 2024
8a3a2f8
Fix a typo
gsurkov Jul 24, 2024
23b5c0d
Fix a copy/paste typo
gsurkov Jul 25, 2024
9cd5aed
Only notify the event loop if stream buffer is at or above its trigge…
gsurkov Jul 25, 2024
82a6f58
Reformat comments
gsurkov Jul 25, 2024
711f3b7
Add event loop stream buffer example application
gsurkov Jul 25, 2024
6c11a31
Add event loop multiple elements example application
gsurkov Jul 26, 2024
0b4f111
Improve event loop flag names
gsurkov Jul 26, 2024
cfe6edb
Remove redundant signal handler as it is already handled by the event…
gsurkov Jul 26, 2024
6872de1
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
gsurkov Jul 29, 2024
dd3ffaf
Refactor Power service, improve ViewHolder
gsurkov Jul 29, 2024
8316854
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
gsurkov Jul 29, 2024
ea730bb
Use ViewHolder instead of ViewDispatcher in About app
gsurkov Jul 30, 2024
a9b437a
Enable ViewDispatcher queue on construction, deprecate view_dispatche…
gsurkov Jul 30, 2024
3e7e7c4
Fix a typo
gsurkov Jul 30, 2024
60b4cd9
Remove all invocations of view_dispatcher_enable_queue()
gsurkov Jul 30, 2024
6a27727
Remove app-scened-template
gsurkov Jul 30, 2024
3f45f5b
Remove missing library from target.json
gsurkov Jul 30, 2024
541a8b8
Merge remote-tracking branch 'origin/dev' into gsurkov/3841_event_loo…
gsurkov Jul 30, 2024
2632fef
Port Accessor app to ViewHolder
gsurkov Jul 30, 2024
045c915
Make the linter happy
gsurkov Jul 30, 2024
b5c25c8
Add example_view_holder application, update ViewHolder docs
gsurkov Jul 30, 2024
0c07e36
Add example_view_dispatcher application, update ViewDispatcher docs
gsurkov Jul 31, 2024
c13c49e
Replace FuriSemaphore with FuriApiLock, remove workaround delay
gsurkov Jul 31, 2024
32fd0eb
Fix logical error
gsurkov Jul 31, 2024
1ed1933
Fix another logical error
gsurkov Jul 31, 2024
d05d6da
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
gsurkov Jul 31, 2024
577d4b5
Use the sources directive to speed up compilation
gsurkov Jul 31, 2024
cbdebd3
Use constant define macro
gsurkov Jul 31, 2024
8ee6cb3
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
hedger Aug 2, 2024
8f6b93c
Merge remote-tracking branch 'origin/dev' into gsurkov/3841_event_loo…
gsurkov Aug 4, 2024
5df5a37
Improve FuriEventLoop documentation
gsurkov Aug 5, 2024
3424ce2
Improve FuriEventLoop documentation once more
gsurkov Aug 5, 2024
9be39e5
Merge branch 'dev' into gsurkov/3841_event_loop_pt2
skotopes Aug 7, 2024
4df93f7
Bump API Version
skotopes Aug 7, 2024
bfe38d0
Gui: remove redundant checks from ViewDispatcher
skotopes Aug 7, 2024
4d5627d
Gui: remove dead ifs from ViewDispatcher
skotopes Aug 7, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ static void view_port_input_callback(InputEvent* input_event, void* context) {
furi_message_queue_put(app->input_queue, input_event, 0);
}

static bool input_queue_callback(FuriMessageQueue* queue, void* context) {
static bool input_queue_callback(FuriEventLoopObject* object, void* context) {
FuriMessageQueue* queue = object;
EventLoopBlinkTestApp* app = context;

InputEvent event;
Expand Down Expand Up @@ -144,7 +145,7 @@ int32_t event_loop_blink_test_app(void* arg) {
gui_add_view_port(gui, view_port, GuiLayerFullscreen);

furi_event_loop_tick_set(app.event_loop, 500, event_loop_tick_callback, &app);
furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
app.event_loop, app.input_queue, FuriEventLoopEventIn, input_queue_callback, &app);

furi_event_loop_run(app.event_loop);
Expand All @@ -154,7 +155,7 @@ int32_t event_loop_blink_test_app(void* arg) {

furi_record_close(RECORD_GUI);

furi_event_loop_message_queue_unsubscribe(app.event_loop, app.input_queue);
furi_event_loop_unsubscribe(app.event_loop, app.input_queue);
furi_message_queue_free(app.input_queue);

for(size_t i = 0; i < TIMER_COUNT; ++i) {
Expand Down
62 changes: 30 additions & 32 deletions applications/debug/unit_tests/tests/furi/furi_event_loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,25 +19,24 @@ typedef struct {
uint32_t consumer_counter;
} TestFuriData;

bool test_furi_event_loop_producer_mq_callback(FuriMessageQueue* queue, void* context) {
bool test_furi_event_loop_producer_mq_callback(FuriEventLoopObject* object, void* context) {
furi_check(context);

TestFuriData* data = context;
furi_check(data->mq == queue, "Invalid queue");
furi_check(data->mq == object, "Invalid queue");

FURI_LOG_I(
TAG, "producer_mq_callback: %lu %lu", data->producer_counter, data->consumer_counter);

// Remove and add should not cause crash
// if(data->producer_counter == EVENT_LOOP_EVENT_COUNT/2) {
// furi_event_loop_message_queue_remove(data->producer_event_loop, data->mq);
// furi_event_loop_message_queue_add(
// data->producer_event_loop,
// data->mq,
// FuriEventLoopEventOut,
// test_furi_event_loop_producer_mq_callback,
// data);
// }
if(data->producer_counter == EVENT_LOOP_EVENT_COUNT / 2) {
furi_event_loop_unsubscribe(data->producer_event_loop, data->mq);
furi_event_loop_subscribe_message_queue(
data->producer_event_loop,
data->mq,
FuriEventLoopEventOut,
test_furi_event_loop_producer_mq_callback,
data);
}

if(data->producer_counter == EVENT_LOOP_EVENT_COUNT) {
furi_event_loop_stop(data->producer_event_loop);
Expand All @@ -61,7 +60,7 @@ int32_t test_furi_event_loop_producer(void* p) {
FURI_LOG_I(TAG, "producer start 1st run");

data->producer_event_loop = furi_event_loop_alloc();
furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
data->producer_event_loop,
data->mq,
FuriEventLoopEventOut,
Expand All @@ -73,15 +72,15 @@ int32_t test_furi_event_loop_producer(void* p) {
// 2 EventLoop index, 0xFFFFFFFF - all possible flags, emulate uncleared flags
xTaskNotifyIndexed(xTaskGetCurrentTaskHandle(), 2, 0xFFFFFFFF, eSetBits);

furi_event_loop_message_queue_unsubscribe(data->producer_event_loop, data->mq);
furi_event_loop_unsubscribe(data->producer_event_loop, data->mq);
furi_event_loop_free(data->producer_event_loop);

FURI_LOG_I(TAG, "producer start 2nd run");

data->producer_counter = 0;
data->producer_event_loop = furi_event_loop_alloc();

furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
data->producer_event_loop,
data->mq,
FuriEventLoopEventOut,
Expand All @@ -90,36 +89,35 @@ int32_t test_furi_event_loop_producer(void* p) {

furi_event_loop_run(data->producer_event_loop);

furi_event_loop_message_queue_unsubscribe(data->producer_event_loop, data->mq);
furi_event_loop_unsubscribe(data->producer_event_loop, data->mq);
furi_event_loop_free(data->producer_event_loop);

FURI_LOG_I(TAG, "producer end");

return 0;
}

bool test_furi_event_loop_consumer_mq_callback(FuriMessageQueue* queue, void* context) {
bool test_furi_event_loop_consumer_mq_callback(FuriEventLoopObject* object, void* context) {
furi_check(context);

TestFuriData* data = context;
furi_check(data->mq == queue);
furi_check(data->mq == object);

furi_delay_us(furi_hal_random_get() % 1000);
furi_check(furi_message_queue_get(data->mq, &data->consumer_counter, 0) == FuriStatusOk);

FURI_LOG_I(
TAG, "consumer_mq_callback: %lu %lu", data->producer_counter, data->consumer_counter);

// Remove and add should not cause crash
// if(data->producer_counter == EVENT_LOOP_EVENT_COUNT/2) {
// furi_event_loop_message_queue_remove(data->consumer_event_loop, data->mq);
// furi_event_loop_message_queue_add(
// data->consumer_event_loop,
// data->mq,
// FuriEventLoopEventIn,
// test_furi_event_loop_producer_mq_callback,
// data);
// }
if(data->consumer_counter == EVENT_LOOP_EVENT_COUNT / 2) {
furi_event_loop_unsubscribe(data->consumer_event_loop, data->mq);
furi_event_loop_subscribe_message_queue(
data->consumer_event_loop,
data->mq,
FuriEventLoopEventIn,
test_furi_event_loop_consumer_mq_callback,
data);
}

if(data->consumer_counter == EVENT_LOOP_EVENT_COUNT) {
furi_event_loop_stop(data->consumer_event_loop);
Expand All @@ -137,7 +135,7 @@ int32_t test_furi_event_loop_consumer(void* p) {
FURI_LOG_I(TAG, "consumer start 1st run");

data->consumer_event_loop = furi_event_loop_alloc();
furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
data->consumer_event_loop,
data->mq,
FuriEventLoopEventIn,
Expand All @@ -149,14 +147,14 @@ int32_t test_furi_event_loop_consumer(void* p) {
// 2 EventLoop index, 0xFFFFFFFF - all possible flags, emulate uncleared flags
xTaskNotifyIndexed(xTaskGetCurrentTaskHandle(), 2, 0xFFFFFFFF, eSetBits);

furi_event_loop_message_queue_unsubscribe(data->consumer_event_loop, data->mq);
furi_event_loop_unsubscribe(data->consumer_event_loop, data->mq);
furi_event_loop_free(data->consumer_event_loop);

FURI_LOG_I(TAG, "consumer start 2nd run");

data->consumer_counter = 0;
data->consumer_event_loop = furi_event_loop_alloc();
furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
data->consumer_event_loop,
data->mq,
FuriEventLoopEventIn,
Expand All @@ -165,7 +163,7 @@ int32_t test_furi_event_loop_consumer(void* p) {

furi_event_loop_run(data->consumer_event_loop);

furi_event_loop_message_queue_unsubscribe(data->consumer_event_loop, data->mq);
furi_event_loop_unsubscribe(data->consumer_event_loop, data->mq);
furi_event_loop_free(data->consumer_event_loop);

FURI_LOG_I(TAG, "consumer end");
Expand Down
4 changes: 4 additions & 0 deletions applications/debug/unit_tests/tests/rpc/rpc_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ static void test_rpc_teardown(void) {
furi_check(
furi_semaphore_acquire(rpc_session[0].terminate_semaphore, FuriWaitForever) ==
FuriStatusOk);
// Temporary workaround: do not delete the semaphore immediately after taking it
furi_delay_ms(1);
furi_record_close(RECORD_RPC);
furi_stream_buffer_free(rpc_session[0].output_stream);
furi_semaphore_free(rpc_session[0].close_session_semaphore);
Expand All @@ -145,6 +147,8 @@ static void test_rpc_teardown_second_session(void) {
furi_check(
furi_semaphore_acquire(rpc_session[1].terminate_semaphore, FuriWaitForever) ==
FuriStatusOk);
// Temporary workaround: do not delete the semaphore immediately after taking it
furi_delay_ms(1);
furi_stream_buffer_free(rpc_session[1].output_stream);
furi_semaphore_free(rpc_session[1].close_session_semaphore);
furi_semaphore_free(rpc_session[1].terminate_semaphore);
Expand Down
10 changes: 3 additions & 7 deletions applications/debug/unit_tests/unit_test_api_table_i.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,10 @@ static constexpr auto unit_tests_api_table = sort(create_array_t<sym_entry>(
API_METHOD(furi_event_loop_alloc, FuriEventLoop*, (void)),
API_METHOD(furi_event_loop_free, void, (FuriEventLoop*)),
API_METHOD(
furi_event_loop_message_queue_subscribe,
furi_event_loop_subscribe_message_queue,
void,
(FuriEventLoop*,
FuriMessageQueue*,
FuriEventLoopEvent,
FuriEventLoopMessageQueueCallback,
void*)),
API_METHOD(furi_event_loop_message_queue_unsubscribe, void, (FuriEventLoop*, FuriMessageQueue*)),
(FuriEventLoop*, FuriMessageQueue*, FuriEventLoopEvent, FuriEventLoopEventCallback, void*)),
API_METHOD(furi_event_loop_unsubscribe, void, (FuriEventLoop*, FuriEventLoopObject*)),
API_METHOD(furi_event_loop_run, void, (FuriEventLoop*)),
API_METHOD(furi_event_loop_stop, void, (FuriEventLoop*)),
API_VARIABLE(PB_Main_msg, PB_Main_msg_t)));
7 changes: 7 additions & 0 deletions applications/examples/example_event_loop/application.fam
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
App(
appid="example_event_loop_timer",
name="Example: Event Loop Timer",
apptype=FlipperAppType.EXTERNAL,
entry_point="example_event_loop_timer_app",
fap_category="Examples",
)
gsurkov marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
/**
* @file example_event_loop_timer.c
* @brief Example application that demonstrates FuriEventLoop's software timer capability.
*
* This application prints a countdown from 10 to 0 to the debug output and then exits.
* Despite only one timer being used in this example for clarity, an event loop instance can have
* an arbitrary number of independent timers of any type (periodic or one-shot).
*
*/
#include <furi.h>

#define TAG "ExampleEventLoopTimer"

#define COUNTDOWN_START_VALUE (10)
#define COUNTDOWN_INTERVAL_MS (1000)

typedef struct {
FuriEventLoop* event_loop;
FuriEventLoopTimer* timer;
uint32_t countdown_value;
} EventLoopTimerApp;

// This function is called each time the timer expires (i.e. once per 1000 ms (1s) in this example)
static void event_loop_timer_callback(void* context) {
furi_assert(context);
EventLoopTimerApp* app = context;

// Print the countdown value
FURI_LOG_I(TAG, "T-00:00:%02lu", app->countdown_value);

if(app->countdown_value == 0) {
// If the countdown reached 0, print the final line and stop the event loop
FURI_LOG_I(TAG, "Blast off to adventure!");
// After this call, the control will be returned back to event_loop_timers_app_run()
furi_event_loop_stop(app->event_loop);

} else {
// Decrement the countdown value
app->countdown_value -= 1;
}
}

static EventLoopTimerApp* event_loop_timer_app_alloc(void) {
EventLoopTimerApp* app = malloc(sizeof(EventLoopTimerApp));

// Create an event loop instance.
app->event_loop = furi_event_loop_alloc();
// Create a software timer instance.
// The timer is bound to the event loop instance and will execute in its context.
// Here, the timer type is periodic, i.e. it will restart automatically after expiring.
app->timer = furi_event_loop_timer_alloc(
app->event_loop, event_loop_timer_callback, FuriEventLoopTimerTypePeriodic, app);
// The countdown value will be tracked in this variable.
app->countdown_value = COUNTDOWN_START_VALUE;

return app;
}

static void event_loop_timer_app_free(EventLoopTimerApp* app) {
// IMPORTANT: All event loop timers MUST be deleted BEFORE deleting the event loop itself.
// Failure to do so will result in a crash.
furi_event_loop_timer_free(app->timer);
// With all timers deleted, it's safe to delete the event loop.
furi_event_loop_free(app->event_loop);
free(app);
}

static void event_loop_timer_app_run(EventLoopTimerApp* app) {
FURI_LOG_I(TAG, "All systems go! Prepare for countdown!");

// Timers can be started either before the event loop is run, or in any
// callback function called by a running event loop.
furi_event_loop_timer_start(app->timer, COUNTDOWN_INTERVAL_MS);
// This call will block until furi_event_loop_stop() is called.
furi_event_loop_run(app->event_loop);
}

// The application's entry point - referenced in application.fam
int32_t example_event_loop_timer_app(void* arg) {
UNUSED(arg);

EventLoopTimerApp* app = event_loop_timer_app_alloc();
event_loop_timer_app_run(app);
event_loop_timer_app_free(app);

return 0;
}
6 changes: 3 additions & 3 deletions applications/services/dolphin/dolphin.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,8 @@ static void dolphin_update_clear_limits_timer_period(void* context) {
FURI_LOG_D(TAG, "Daily limits reset in %lu ms", time_to_clear_limits);
}

static bool dolphin_process_event(FuriMessageQueue* queue, void* context) {
UNUSED(queue);
static bool dolphin_process_event(FuriEventLoopObject* object, void* context) {
UNUSED(object);

Dolphin* dolphin = context;
DolphinEvent event;
Expand Down Expand Up @@ -249,7 +249,7 @@ int32_t dolphin_srv(void* p) {

dolphin_state_load(dolphin->state);

furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
dolphin->event_loop,
dolphin->event_queue,
FuriEventLoopEventIn,
Expand Down
18 changes: 8 additions & 10 deletions applications/services/gui/view_dispatcher.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,11 @@ void view_dispatcher_free(ViewDispatcher* view_dispatcher) {
view_port_free(view_dispatcher->view_port);
// Free internal queue
if(view_dispatcher->input_queue) {
furi_event_loop_message_queue_unsubscribe(
view_dispatcher->event_loop, view_dispatcher->input_queue);
furi_event_loop_unsubscribe(view_dispatcher->event_loop, view_dispatcher->input_queue);
furi_message_queue_free(view_dispatcher->input_queue);
}
if(view_dispatcher->event_queue) {
furi_event_loop_message_queue_unsubscribe(
view_dispatcher->event_loop, view_dispatcher->event_queue);
furi_event_loop_unsubscribe(view_dispatcher->event_loop, view_dispatcher->event_queue);
furi_message_queue_free(view_dispatcher->event_queue);
}
if(view_dispatcher->event_loop) {
Expand All @@ -53,15 +51,15 @@ void view_dispatcher_enable_queue(ViewDispatcher* view_dispatcher) {
view_dispatcher->event_loop = furi_event_loop_alloc();

view_dispatcher->input_queue = furi_message_queue_alloc(16, sizeof(InputEvent));
furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
view_dispatcher->event_loop,
view_dispatcher->input_queue,
FuriEventLoopEventIn,
view_dispatcher_run_input_callback,
view_dispatcher);

view_dispatcher->event_queue = furi_message_queue_alloc(16, sizeof(uint32_t));
furi_event_loop_message_queue_subscribe(
furi_event_loop_subscribe_message_queue(
view_dispatcher->event_loop,
view_dispatcher->event_queue,
FuriEventLoopEventIn,
Expand Down Expand Up @@ -381,10 +379,10 @@ void view_dispatcher_update(View* view, void* context) {
}
}

bool view_dispatcher_run_event_callback(FuriMessageQueue* queue, void* context) {
bool view_dispatcher_run_event_callback(FuriEventLoopObject* object, void* context) {
furi_assert(context);
ViewDispatcher* instance = context;
furi_assert(instance->event_queue == queue);
furi_assert(instance->event_queue == object);

uint32_t event;
furi_check(furi_message_queue_get(instance->event_queue, &event, 0) == FuriStatusOk);
Expand All @@ -393,10 +391,10 @@ bool view_dispatcher_run_event_callback(FuriMessageQueue* queue, void* context)
return true;
}

bool view_dispatcher_run_input_callback(FuriMessageQueue* queue, void* context) {
bool view_dispatcher_run_input_callback(FuriEventLoopObject* object, void* context) {
furi_assert(context);
ViewDispatcher* instance = context;
furi_assert(instance->input_queue == queue);
furi_assert(instance->input_queue == object);

InputEvent input;
furi_check(furi_message_queue_get(instance->input_queue, &input, 0) == FuriStatusOk);
Expand Down
4 changes: 2 additions & 2 deletions applications/services/gui/view_dispatcher_i.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ void view_dispatcher_set_current_view(ViewDispatcher* view_dispatcher, View* vie
void view_dispatcher_update(View* view, void* context);

/** ViewDispatcher run event loop event callback */
bool view_dispatcher_run_event_callback(FuriMessageQueue* queue, void* context);
bool view_dispatcher_run_event_callback(FuriEventLoopObject* object, void* context);

/** ViewDispatcher run event loop input callback */
bool view_dispatcher_run_input_callback(FuriMessageQueue* queue, void* context);
bool view_dispatcher_run_input_callback(FuriEventLoopObject* object, void* context);
Loading
Loading