1
0
Fork 0
mirror of https://github.com/betaflight/betaflight.git synced 2025-07-15 04:15:44 +03:00

Adding RP2350 SDK and target framework (#13988)

* Adding RP2350 SDK and target framework

* Spacing

* Removing board definitions
This commit is contained in:
J Blackman 2024-10-23 10:02:48 +11:00 committed by GitHub
parent 462cb05930
commit 2dd6f95aad
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
576 changed files with 435012 additions and 0 deletions

View file

@ -0,0 +1,123 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/async_context_base.h"
bool async_context_base_add_at_time_worker(async_context_t *self, async_at_time_worker_t *worker) {
async_at_time_worker_t **prev = &self->at_time_list;
while (*prev) {
if (worker == *prev) {
return false;
}
prev = &(*prev)->next;
}
*prev = worker;
worker->next = NULL;
return true;
}
bool async_context_base_remove_at_time_worker(async_context_t *self, async_at_time_worker_t *worker) {
async_at_time_worker_t **prev = &self->at_time_list;
while (*prev) {
if (worker == *prev) {
*prev = worker->next;
return true;
}
prev = &(*prev)->next;
}
return false;
}
bool async_context_base_add_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker) {
async_when_pending_worker_t **prev = &self->when_pending_list;
while (*prev) {
if (worker == *prev) {
return false;
}
prev = &(*prev)->next;
}
*prev = worker;
worker->next = NULL;
return true;
}
bool async_context_base_remove_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker) {
async_when_pending_worker_t **prev = &self->when_pending_list;
while (*prev) {
if (worker == *prev) {
*prev = worker->next;
return true;
}
prev = &(*prev)->next;
}
return false;
}
async_at_time_worker_t *async_context_base_remove_ready_at_time_worker(async_context_t *self) {
async_at_time_worker_t **best_prev = NULL;
if (self->at_time_list) {
absolute_time_t earliest = get_absolute_time();
for (async_at_time_worker_t **prev = &self->at_time_list; *prev; prev = &(*prev)->next) {
if (absolute_time_diff_us((*prev)->next_time, earliest) >= 0) {
earliest = (*prev)->next_time;
assert(!is_at_the_end_of_time(earliest)); // should never be less than now
best_prev = prev;
}
}
}
async_at_time_worker_t *rc;
if (best_prev) {
rc = *best_prev;
*best_prev = rc->next;
} else {
rc = NULL;
}
return rc;
}
void async_context_base_refresh_next_timeout(async_context_t *self) {
absolute_time_t earliest = at_the_end_of_time;
for (async_at_time_worker_t *worker = self->at_time_list; worker; ) {
async_at_time_worker_t *next = worker->next;
if (absolute_time_diff_us(worker->next_time, earliest) > 0) {
earliest = worker->next_time;
}
worker = next;
}
self->next_time = earliest;
}
absolute_time_t async_context_base_execute_once(async_context_t *self) {
async_at_time_worker_t *at_time_worker;
while (NULL != (at_time_worker = async_context_base_remove_ready_at_time_worker(self))) {
at_time_worker->do_work(self, at_time_worker);
}
for(async_when_pending_worker_t *when_pending_worker = self->when_pending_list; when_pending_worker; when_pending_worker = when_pending_worker->next) {
if (when_pending_worker->work_pending) {
when_pending_worker->work_pending = false;
when_pending_worker->do_work(self, when_pending_worker);
}
}
async_context_base_refresh_next_timeout(self);
return self->next_time;
}
bool async_context_base_needs_servicing(async_context_t *self) {
absolute_time_t now = get_absolute_time();
if (self->at_time_list) {
for (async_at_time_worker_t *worker = self->at_time_list; worker; worker = worker->next) {
if (absolute_time_diff_us(worker->next_time, now) >= 0) {
return true;
}
}
}
for(async_when_pending_worker_t *when_pending_worker = self->when_pending_list; when_pending_worker; when_pending_worker = when_pending_worker->next) {
if (when_pending_worker->work_pending) {
return true;
}
}
return false;
}

View file

@ -0,0 +1,295 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include "pico/async_context_freertos.h"
#include "pico/async_context_base.h"
#include "pico/sync.h"
#include "hardware/irq.h"
#include "semphr.h"
#if configNUMBER_OF_CORES > 1 && !defined(configUSE_CORE_AFFINITY)
#error async_context_freertos requires configUSE_CORE_AFFINITY under SMP
#endif
static const async_context_type_t template;
static void async_context_freertos_acquire_lock_blocking(async_context_t *self_base);
static void async_context_freertos_release_lock(async_context_t *self_base);
static void async_context_freertos_lock_check(async_context_t *self_base);
static TickType_t sensible_ticks_until(absolute_time_t until) {
TickType_t ticks;
int64_t delay_us = absolute_time_diff_us(get_absolute_time(), until);
if (delay_us <= 0) {
ticks = 0;
} else {
static const uint32_t max_delay = 60000000;
uint32_t delay_us_32 = delay_us > max_delay ? max_delay : (uint32_t) delay_us;
ticks = pdMS_TO_TICKS((delay_us_32+999)/1000);
// we want to round up, as both rounding down to zero is wrong (may produce no delays
// where delays are needed), but also we don't want to wake up, and then realize there
// is no work to do yet!
ticks++;
}
return ticks;
}
static void process_under_lock(async_context_freertos_t *self) {
#ifndef NDEBUG
async_context_freertos_lock_check(&self->core);
#endif
bool repeat;
do {
repeat = false;
absolute_time_t next_time = async_context_base_execute_once(&self->core);
TickType_t ticks;
if (is_at_the_end_of_time(next_time)) {
ticks = portMAX_DELAY;
} else {
ticks = sensible_ticks_until(next_time);
}
if (ticks) {
// last parameter (timeout) is also 'ticks', since there is no point waiting to change the period
// for longer than the period itself!
repeat = pdFALSE == xTimerChangePeriod(self->timer_handle, ticks, ticks);
} else {
repeat = true;
}
} while (repeat);
}
static void async_context_task(__unused void *vself) {
async_context_freertos_t *self = (async_context_freertos_t *)vself;
do {
ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
if (self->task_should_exit) break;
async_context_freertos_acquire_lock_blocking(&self->core);
process_under_lock(self);
async_context_freertos_release_lock(&self->core);
__sev(); // it is possible regular code is waiting on a WFE on the other core
} while (!self->task_should_exit);
vTaskDelete(NULL);
}
static void async_context_freertos_wake_up(async_context_t *self_base) {
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
if (self->task_handle) {
if (portCHECK_IF_IN_ISR()) {
vTaskNotifyGiveFromISR(self->task_handle, NULL);
xSemaphoreGiveFromISR(self->work_needed_sem, NULL);
} else {
// we don't want to wake ourselves up (we will only ever be called
// from the async_context_task if we own the lock, in which case processing
// will already happen when the lock is finally unlocked
if (xTaskGetCurrentTaskHandle() != self->task_handle) {
xTaskNotifyGive(self->task_handle);
xSemaphoreGive(self->work_needed_sem);
} else {
#ifndef NDEBUG
async_context_freertos_lock_check(self_base);
#endif
}
}
}
}
static void timer_handler(__unused TimerHandle_t handle)
{
async_context_freertos_t *self = (async_context_freertos_t *)pvTimerGetTimerID(handle);
async_context_freertos_wake_up(&self->core);
}
bool async_context_freertos_init(async_context_freertos_t *self, async_context_freertos_config_t *config) {
memset(self, 0, sizeof(*self));
self->core.type = &template;
self->core.flags = ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
self->core.core_num = get_core_num();
self->lock_mutex = xSemaphoreCreateRecursiveMutex();
self->work_needed_sem = xSemaphoreCreateBinary();
self->timer_handle = xTimerCreate( "async_context_timer", // Just a text name, not used by the kernel.
portMAX_DELAY,
pdFALSE, // The timers will auto-reload themselves when they expire.
self,
timer_handler);
if (!self->lock_mutex ||
!self->work_needed_sem ||
!self->timer_handle ||
pdPASS != xTaskCreate(async_context_task, "async_context_task", config->task_stack_size, self,
config->task_priority, &self->task_handle)) {
async_context_deinit(&self->core);
return false;
}
#if configNUMBER_OF_CORES > 1
UBaseType_t core_id = config->task_core_id;
if (core_id == (UBaseType_t)-1) {
core_id = portGET_CORE_ID();
}
// we must run on a single core
vTaskCoreAffinitySet(self->task_handle, 1u << core_id);
#endif
return true;
}
static uint32_t end_task_func(void *param) {
async_context_freertos_t *self = (async_context_freertos_t *)param;
// we will immediately exit
self->task_should_exit = true;
return 0;
}
void async_context_freertos_deinit(async_context_t *self_base) {
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
if (self->task_handle) {
async_context_execute_sync(self_base, end_task_func, self_base);
}
if (self->timer_handle) {
xTimerDelete(self->timer_handle, 0);
}
if (self->lock_mutex) {
vSemaphoreDelete(self->lock_mutex);
}
if (self->work_needed_sem) {
vSemaphoreDelete(self->work_needed_sem);
}
memset(self, 0, sizeof(*self));
}
void async_context_freertos_acquire_lock_blocking(async_context_t *self_base) {
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
// Lock the other core and stop low_prio_irq running
assert(!portCHECK_IF_IN_ISR());
xSemaphoreTakeRecursive(self->lock_mutex, portMAX_DELAY);
self->nesting++;
}
void async_context_freertos_lock_check(__unused async_context_t *self_base) {
#ifndef NDEBUG
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
// Lock the other core and stop low_prio_irq running
assert(xSemaphoreGetMutexHolder(self->lock_mutex) == xTaskGetCurrentTaskHandle());
#endif
}
typedef struct sync_func_call{
async_when_pending_worker_t worker;
SemaphoreHandle_t sem;
uint32_t (*func)(void *param);
void *param;
uint32_t rc;
} sync_func_call_t;
static void handle_sync_func_call(async_context_t *context, async_when_pending_worker_t *worker) {
sync_func_call_t *call = (sync_func_call_t *)worker;
call->rc = call->func(call->param);
xSemaphoreGive(call->sem);
async_context_remove_when_pending_worker(context, worker);
}
uint32_t async_context_freertos_execute_sync(async_context_t *self_base, uint32_t (*func)(void *param), void *param) {
async_context_freertos_t *self = (async_context_freertos_t*)self_base;
hard_assert(xSemaphoreGetMutexHolder(self->lock_mutex) != xTaskGetCurrentTaskHandle());
sync_func_call_t call;
call.worker.do_work = handle_sync_func_call;
call.func = func;
call.param = param;
call.sem = xSemaphoreCreateBinary();
async_context_add_when_pending_worker(self_base, &call.worker);
async_context_set_work_pending(self_base, &call.worker);
xSemaphoreTake(call.sem, portMAX_DELAY);
vSemaphoreDelete(call.sem);
return call.rc;
}
void async_context_freertos_release_lock(async_context_t *self_base) {
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
bool do_wakeup = false;
if (self->nesting == 1) {
// note that we always do a processing on outermost lock exit, to facilitate cases
// like lwIP where we have no notification when lwIP timers are added.
//
// this operation must be done from the right task
if (self->task_handle != xTaskGetCurrentTaskHandle()) {
// note we defer the wakeup until after we release the lock, otherwise it can be wasteful
// (waking up the task, but then having it block immediately on us)
do_wakeup = true;
} else {
process_under_lock(self);
}
}
--self->nesting;
xSemaphoreGiveRecursive(self->lock_mutex);
if (do_wakeup) {
async_context_freertos_wake_up(self_base);
}
}
static bool async_context_freertos_add_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
async_context_freertos_acquire_lock_blocking(self_base);
bool rc = async_context_base_add_at_time_worker(self_base, worker);
async_context_freertos_release_lock(self_base);
return rc;
}
static bool async_context_freertos_remove_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
async_context_freertos_acquire_lock_blocking(self_base);
bool rc = async_context_base_remove_at_time_worker(self_base, worker);
async_context_freertos_release_lock(self_base);
return rc;
}
static bool async_context_freertos_add_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
async_context_freertos_acquire_lock_blocking(self_base);
bool rc = async_context_base_add_when_pending_worker(self_base, worker);
async_context_freertos_release_lock(self_base);
return rc;
}
static bool async_context_freertos_remove_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
async_context_freertos_acquire_lock_blocking(self_base);
bool rc = async_context_base_remove_when_pending_worker(self_base, worker);
async_context_freertos_release_lock(self_base);
return rc;
}
static void async_context_freertos_set_work_pending(async_context_t *self_base, async_when_pending_worker_t *worker) {
worker->work_pending = true;
async_context_freertos_wake_up(self_base);
}
static void async_context_freertos_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
assert(!portCHECK_IF_IN_ISR());
TickType_t ticks = sensible_ticks_until(until);
vTaskDelay(ticks);
}
static void async_context_freertos_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
async_context_freertos_t *self = (async_context_freertos_t *)self_base;
assert(!portCHECK_IF_IN_ISR());
while (!time_reached(until)) {
TickType_t ticks = sensible_ticks_until(until);
if (!ticks || xSemaphoreTake(self->work_needed_sem, ticks)) return;
}
}
static const async_context_type_t template = {
.type = ASYNC_CONTEXT_FREERTOS,
.acquire_lock_blocking = async_context_freertos_acquire_lock_blocking,
.release_lock = async_context_freertos_release_lock,
.lock_check = async_context_freertos_lock_check,
.execute_sync = async_context_freertos_execute_sync,
.add_at_time_worker = async_context_freertos_add_at_time_worker,
.remove_at_time_worker = async_context_freertos_remove_at_time_worker,
.add_when_pending_worker = async_context_freertos_add_when_pending_worker,
.remove_when_pending_worker = async_context_freertos_remove_when_pending_worker,
.set_work_pending = async_context_freertos_set_work_pending,
.poll = 0,
.wait_until = async_context_freertos_wait_until,
.wait_for_work_until = async_context_freertos_wait_for_work_until,
.deinit = async_context_freertos_deinit,
};

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include "pico/async_context_poll.h"
#include "pico/async_context_base.h"
#include "pico/sync.h"
static void noop(__unused async_context_t *context) { }
static const async_context_type_t template;
bool async_context_poll_init_with_defaults(async_context_poll_t *self) {
memset(self, 0, sizeof(*self));
self->core.core_num = get_core_num();
self->core.type = &template;
self->core.flags = ASYNC_CONTEXT_FLAG_POLLED | ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
sem_init(&self->sem, 1, 1);
return true;
}
static void async_context_poll_wake_up(async_context_t *self_base) {
sem_release(&((async_context_poll_t *)self_base)->sem);
}
static void async_context_poll_requires_update(async_context_t *self_base, async_when_pending_worker_t *worker) {
worker->work_pending = true;
async_context_poll_wake_up(self_base);
}
static void async_context_poll_poll(async_context_t *self_base) {
async_context_base_execute_once(self_base);
}
static void async_context_poll_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
sleep_until(until);
}
static void async_context_poll_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
absolute_time_t next_time = self_base->next_time;
async_context_poll_t *self = (async_context_poll_t *)self_base;
sem_acquire_block_until(&self->sem, absolute_time_min(next_time, until));
}
static void async_context_poll_lock_check(async_context_t *self_base) {
if (__get_current_exception() || get_core_num() != self_base->core_num) {
panic("async_context_poll context check failed (IRQ or wrong core)");
}
}
uint32_t async_context_poll_execute_sync(__unused async_context_t *context, uint32_t (*func)(void *param), void *param) {
return func(param);
}
static const async_context_type_t template = {
.type = ASYNC_CONTEXT_POLL,
.acquire_lock_blocking = noop,
.release_lock = noop,
.lock_check = async_context_poll_lock_check,
.execute_sync = async_context_poll_execute_sync,
.add_at_time_worker = async_context_base_add_at_time_worker,
.remove_at_time_worker = async_context_base_remove_at_time_worker,
.add_when_pending_worker = async_context_base_add_when_pending_worker,
.remove_when_pending_worker = async_context_base_remove_when_pending_worker,
.set_work_pending = async_context_poll_requires_update,
.poll = async_context_poll_poll,
.wait_until = async_context_poll_wait_until,
.wait_for_work_until = async_context_poll_wait_for_work_until,
.deinit = noop,
};

View file

@ -0,0 +1,370 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include "pico/async_context_threadsafe_background.h"
#include "pico/async_context_base.h"
#include "pico/sync.h"
#include "hardware/irq.h"
static const async_context_type_t template;
static async_context_threadsafe_background_t *async_contexts_by_user_irq[NUM_USER_IRQS];
static void low_priority_irq_handler(void);
static void process_under_lock(async_context_threadsafe_background_t *self);
static int64_t alarm_handler(alarm_id_t id, void *user_data);
#ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY
#define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY PICO_LOWEST_IRQ_PRIORITY
#endif
#ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS
#define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS 4
#endif
async_context_threadsafe_background_config_t async_context_threadsafe_background_default_config(void) {
async_context_threadsafe_background_config_t config = {
.low_priority_irq_handler_priority = ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY,
.custom_alarm_pool = NULL,
};
return config;
}
static inline uint recursive_mutex_enter_count(recursive_mutex_t *mutex) {
return mutex->enter_count;
}
static inline lock_owner_id_t recursive_mutex_owner(recursive_mutex_t *mutex) {
return mutex->owner;
}
static void async_context_threadsafe_background_wake_up(async_context_t *self_base) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
if (self_base->core_num == get_core_num()) {
// on same core, can dispatch directly
irq_set_pending(self->low_priority_irq_num);
} else {
// remove the existing alarm (it may have already fired) so we don't overflow the pool with repeats
//
// note that force_alarm_id is not protected here, however if we miss removing one, they will fire
// almost immediately anyway (since they were set in the past)
alarm_id_t force_alarm_id = self->force_alarm_id;
if (force_alarm_id > 0) {
alarm_pool_cancel_alarm(self->alarm_pool, force_alarm_id);
}
// we cause an early timeout (0 is always in the past) on the alarm_pool core
// note that by the time this returns, the timer may already have fired, so we
// may end up setting self->force_alarm_id to a stale timer id, but that is fine as we
// will harmlessly cancel it again next time
self->force_alarm_id = alarm_pool_add_alarm_at_force_in_context(self->alarm_pool, from_us_since_boot(0),
alarm_handler, self);
}
#else
// on same core, can dispatch directly
irq_set_pending(self->low_priority_irq_num);
#endif
sem_release(&self->work_needed_sem);
}
// Prevent background processing in pensv and access by the other core
// These methods are called in pensv context and on either core
// They can be called recursively
static inline void lock_acquire(async_context_threadsafe_background_t *self) {
// Lock the other core and stop low_prio_irq running
recursive_mutex_enter_blocking(&self->lock_mutex);
}
static void async_context_threadsafe_background_lock_check(async_context_t *self_base) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
// Lock the other core and stop low_prio_irq running
if (recursive_mutex_enter_count(&self->lock_mutex) < 1 || recursive_mutex_owner(&self->lock_mutex) != lock_get_caller_owner_id()) {
panic_compact("async_context lock_check failed");
}
}
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
typedef struct sync_func_call{
async_when_pending_worker_t worker;
semaphore_t sem;
uint32_t (*func)(void *param);
void *param;
uint32_t rc;
} sync_func_call_t;
static void handle_sync_func_call(async_context_t *context, async_when_pending_worker_t *worker) {
sync_func_call_t *call = (sync_func_call_t *)worker;
call->rc = call->func(call->param);
sem_release(&call->sem);
async_context_remove_when_pending_worker(context, worker);
}
#endif
static void lock_release(async_context_threadsafe_background_t *self) {
bool outermost = 1 == recursive_mutex_enter_count(&self->lock_mutex);
// note that it is *not* a requirement to have low_prio_irq_missed handled on the
// same core and in low-priority riq, as we are only *logically* a single thread. the user
// is already free to call from either core, and this would only happen on a different
// core, if the user *themselves* is acquiring the lock from other cores anyway
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
bool wake_other_core = false;
#endif
if (outermost) {
// note that we always do a processing on outermost lock exit, to facilitate cases
// like lwIP where we have no notification when lwIP timers are added.
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
if (self->core.core_num == get_core_num()) {
process_under_lock(self);
} else if (async_context_base_needs_servicing(&self->core)) {
// have to wake up other core
wake_other_core = true;
}
#else
process_under_lock(self);
#endif
}
recursive_mutex_exit(&self->lock_mutex);
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
if (wake_other_core) {
async_context_threadsafe_background_wake_up(&self->core);
}
#endif
}
uint32_t async_context_threadsafe_background_execute_sync(async_context_t *self_base, uint32_t (*func)(void *param), void *param) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t*)self_base;
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
if (self_base->core_num != get_core_num()) {
hard_assert(!recursive_mutex_enter_count(&self->lock_mutex));
sync_func_call_t call;
call.worker.do_work = handle_sync_func_call;
call.func = func;
call.param = param;
sem_init(&call.sem, 0, 1);
async_context_add_when_pending_worker(self_base, &call.worker);
async_context_set_work_pending(self_base, &call.worker);
sem_acquire_blocking(&call.sem);
return call.rc;
}
#endif
// short-circuit if we are on the right core
lock_acquire(self);
uint32_t rc = func(param);
lock_release(self);
return rc;
}
static bool low_prio_irq_init(async_context_threadsafe_background_t *self, uint8_t priority) {
assert(get_core_num() == self->core.core_num);
int irq = user_irq_claim_unused(false);
if (irq < 0) return false;
self->low_priority_irq_num = (uint8_t) irq;
uint index = irq - FIRST_USER_IRQ;
assert(index < count_of(async_contexts_by_user_irq));
async_contexts_by_user_irq[index] = self;
irq_set_exclusive_handler(self->low_priority_irq_num, low_priority_irq_handler);
irq_set_enabled(self->low_priority_irq_num, true);
irq_set_priority(self->low_priority_irq_num, priority);
return true;
}
static void low_prio_irq_deinit(async_context_threadsafe_background_t *self) {
if (self->low_priority_irq_num > 0) {
assert(get_core_num() == self->core.core_num);
irq_set_enabled(self->low_priority_irq_num, false);
irq_remove_handler(self->low_priority_irq_num, low_priority_irq_handler);
user_irq_unclaim(self->low_priority_irq_num);
self->low_priority_irq_num = 0;
}
}
static int64_t alarm_handler(__unused alarm_id_t id, void *user_data) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t*)user_data;
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
self->force_alarm_id = 0;
#endif
self->alarm_pending = false;
async_context_threadsafe_background_wake_up(&self->core);
return 0;
}
bool async_context_threadsafe_background_init(async_context_threadsafe_background_t *self, async_context_threadsafe_background_config_t *config) {
memset(self, 0, sizeof(*self));
self->core.type = &template;
self->core.flags = ASYNC_CONTEXT_FLAG_CALLBACK_FROM_IRQ | ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
self->core.core_num = get_core_num();
if (config->custom_alarm_pool) {
self->alarm_pool = config->custom_alarm_pool;
} else {
#if PICO_TIME_DEFAULT_ALARM_POOL_DISABLED
self->alarm_pool = alarm_pool_create_with_unused_hardware_alarm(ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS);
self->alarm_pool_owned = true;
#else
self->alarm_pool = alarm_pool_get_default();
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
if (self->core.core_num != alarm_pool_core_num(self->alarm_pool)) {
self->alarm_pool = alarm_pool_create_with_unused_hardware_alarm(ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS);
self->alarm_pool_owned = true;
}
#endif
#endif
}
assert(self->core.core_num == alarm_pool_core_num(self->alarm_pool));
sem_init(&self->work_needed_sem, 1, 1);
recursive_mutex_init(&self->lock_mutex);
bool ok = low_prio_irq_init(self, config->low_priority_irq_handler_priority);
return ok;
}
static void async_context_threadsafe_background_set_work_pending(async_context_t *self_base, async_when_pending_worker_t *worker) {
worker->work_pending = true;
async_context_threadsafe_background_wake_up(self_base);
}
static void async_context_threadsafe_background_deinit(async_context_t *self_base) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
// todo we do not currently handle this correctly; we could, but seems like a rare case
assert(get_core_num() == self_base->core_num);
low_prio_irq_deinit(self);
if (self->alarm_id > 0) alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
if (self->alarm_pool_owned) {
alarm_pool_destroy(self->alarm_pool);
}
#endif
// acquire the lock to make sure the callback is not running (we have already disabled the IRQ
recursive_mutex_enter_blocking(&self->lock_mutex);
recursive_mutex_exit(&self->lock_mutex);
memset(self, 0, sizeof(*self));
}
static void process_under_lock(async_context_threadsafe_background_t *self) {
#ifndef NDEBUG
async_context_threadsafe_background_lock_check(&self->core);
assert(self->core.core_num == get_core_num());
#endif
do {
absolute_time_t next_time = async_context_base_execute_once(&self->core);
// if the next wakeup time is in the past then loop
if (absolute_time_diff_us(get_absolute_time(), next_time) <= 0) continue;
// if there is no next wakeup time, we're done
if (is_at_the_end_of_time(next_time)) {
// cancel the alarm early (we will have been called soon after an alarm wakeup), so that
// we don't risk alarm_id collision.
if (self->alarm_id > 0) {
alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
self->alarm_id = 0;
}
break;
}
// the following is an optimization; we are often called much more frequently than timeouts actually change,
// and removing and re-adding the timers has some non-trivial overhead (10s of microseconds), we choose
// to allow the existing timeout to run to completion, and then re-asses from there, unless the new wakeup
// time is before the last one set.
//
// note that alarm_pending is not protected, however, if it is wrong, it is wrong in the sense that it is
// false when it should be true, so if it is wrong due to a race, we will cancel and re-add the alarm which is safe.
if (self->alarm_pending && absolute_time_diff_us(self->last_set_alarm_time, next_time) > 0) break;
// cancel the existing alarm (it may no longer exist)
if (self->alarm_id > 0) alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
self->last_set_alarm_time = next_time;
self->alarm_pending = true;
self->alarm_id = alarm_pool_add_alarm_at(self->alarm_pool, next_time, alarm_handler, self, false);
if (self->alarm_id > 0) break;
self->alarm_pending = false;
} while (true);
}
// Low priority interrupt handler to perform background processing
static void low_priority_irq_handler(void) {
uint index = __get_current_exception() - VTABLE_FIRST_IRQ - FIRST_USER_IRQ;
assert(index < count_of(async_contexts_by_user_irq));
async_context_threadsafe_background_t *self = async_contexts_by_user_irq[index];
if (!self) return;
assert(self->core.core_num == get_core_num());
if (recursive_mutex_try_enter(&self->lock_mutex, NULL)) {
// if the recurse count is not 1 then we have pre-empted something which held the lock on the same core,
// so we cannot do processing here (however processing will be done when that lock is released)
if (recursive_mutex_enter_count(&self->lock_mutex) == 1) {
process_under_lock(self);
}
recursive_mutex_exit(&self->lock_mutex);
}
}
static void async_context_threadsafe_background_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
// can be called in IRQs, in which case we just have to wait
if (__get_current_exception()) {
busy_wait_until(until);
} else {
sleep_until(until);
}
}
static void async_context_threadsafe_background_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
sem_acquire_block_until(&self->work_needed_sem, until);
}
static bool async_context_threadsafe_background_add_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
lock_acquire(self);
bool rc = async_context_base_add_at_time_worker(self_base, worker);
lock_release(self);
return rc;
}
static bool async_context_threadsafe_background_remove_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
lock_acquire(self);
bool rc = async_context_base_remove_at_time_worker(self_base, worker);
lock_release(self);
return rc;
}
static bool async_context_threadsafe_background_add_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
lock_acquire(self);
bool rc = async_context_base_add_when_pending_worker(self_base, worker);
lock_release(self);
return rc;
}
static bool async_context_threadsafe_background_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
lock_acquire(self);
bool rc = async_context_base_remove_when_pending_worker(self_base, worker);
lock_release(self);
return rc;
}
static void async_context_threadsafe_background_acquire_lock_blocking(async_context_t *self_base) {
lock_acquire((async_context_threadsafe_background_t *) self_base);
}
static void async_context_threadsafe_background_release_lock(async_context_t *self_base) {
lock_release((async_context_threadsafe_background_t *)self_base);
}
static const async_context_type_t template = {
.type = ASYNC_CONTEXT_THREADSAFE_BACKGROUND,
.acquire_lock_blocking = async_context_threadsafe_background_acquire_lock_blocking,
.release_lock = async_context_threadsafe_background_release_lock,
.lock_check = async_context_threadsafe_background_lock_check,
.execute_sync = async_context_threadsafe_background_execute_sync,
.add_at_time_worker = async_context_threadsafe_background_add_at_time_worker,
.remove_at_time_worker = async_context_threadsafe_background_remove_at_time_worker,
.add_when_pending_worker = async_context_threadsafe_background_add_when_pending_worker,
.remove_when_pending_worker = async_context_threadsafe_background_when_pending_worker,
.set_work_pending = async_context_threadsafe_background_set_work_pending,
.poll = 0,
.wait_until = async_context_threadsafe_background_wait_until,
.wait_for_work_until = async_context_threadsafe_background_wait_for_work_until,
.deinit = async_context_threadsafe_background_deinit,
};

View file

@ -0,0 +1,471 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/** \file pico/async_context.h
* \defgroup pico_async_context pico_async_context
*
* \brief An \ref async_context provides a logically single-threaded context for performing work, and responding
* to asynchronous events. Thus an async_context instance is suitable for servicing third-party libraries
* that are not re-entrant.
*
* The "context" in async_context refers to the fact that when calling workers or timeouts within the
* async_context various pre-conditions hold:
*
* <ol>
* <li>That there is a single logical thread of execution; i.e. that the context does not call any worker
* functions concurrently.
* <li>That the context always calls workers from the same processor core, as most uses of async_context rely on interaction
* with IRQs which are themselves core-specific.
* </ol>
*
* THe async_context provides two mechanisms for asynchronous work:
*
* * <em>when_pending</em> workers, which are processed whenever they have work pending. See \ref async_context_add_when_pending_worker,
* \ref async_context_remove_when_pending_worker, and \ref async_context_set_work_pending, the latter of which can be used from an interrupt handler
* to signal that servicing work is required to be performed by the worker from the regular async_context.
* * <em>at_time</em> workers, that are executed after at a specific time.
*
* Note: "when pending" workers with work pending are executed before "at time" workers.
*
* The async_context provides locking mechanisms, see \ref async_context_acquire_lock_blocking,
* \ref async_context_release_lock and \ref async_context_lock_check which can be used by
* external code to ensure execution of external code does not happen concurrently with worker code.
* Locked code runs on the calling core, however \ref async_context_execute_sync is provided to
* synchronously run a function from the core of the async_context.
*
* The SDK ships with the following default async_contexts:
*
* async_context_poll - this context is not thread-safe, and the user is responsible for calling
* \ref async_context_poll() periodically, and can use async_context_wait_for_work_until() to sleep
* between calls until work is needed if the user has nothing else to do.
*
* async_context_threadsafe_background - in order to work in the background, a low priority IRQ is used
* to handle callbacks. Code is usually invoked from this IRQ context, but may be invoked after any other code
* that uses the async context in another (non-IRQ) context on the same core. Calling \ref async_context_poll() is
* not required, and is a no-op. This context implements async_context locking and is thus safe to call
* from either core, according to the specific notes on each API.
*
* async_context_freertos - Work is performed from a separate "async_context" task, however once again, code may
* also be invoked after a direct use of the async_context on the same core that the async_context belongs to. Calling
* \ref async_context_poll() is not required, and is a no-op. This context implements async_context locking and is thus
* safe to call from any task, and from either core, according to the specific notes on each API.
*
* Each async_context provides bespoke methods of instantiation which are provided in the corresponding headers (e.g.
* async_context_poll.h, async_context_threadsafe_background.h, asycn_context_freertos.h).
* async_contexts are de-initialized by the common async_context_deint() method.
*
* Multiple async_context instances can be used by a single application, and they will operate independently.
*/
#ifndef _PICO_ASYNC_CONTEXT_H
#define _PICO_ASYNC_CONTEXT_H
#include "pico.h"
#include "pico/time.h"
#ifdef __cplusplus
extern "C" {
#endif
enum {
ASYNC_CONTEXT_POLL = 1,
ASYNC_CONTEXT_THREADSAFE_BACKGROUND = 2,
ASYNC_CONTEXT_FREERTOS = 3,
};
typedef struct async_context async_context_t;
/*! \brief A "timeout" instance used by an async_context
* \ingroup pico_async_context
*
* A "timeout" represents some future action that must be taken at a specific time.
* Its methods are called from the async_context under lock at the given time
*
* \see async_context_add_worker_at
* \see async_context_add_worker_in_ms
*/
typedef struct async_work_on_timeout {
/*!
* \brief private link list pointer
*/
struct async_work_on_timeout *next;
/*!
* \brief Method called when the timeout is reached; may not be NULL
*
* Note, that when this method is called, the timeout has been removed from the async_context, so
* if you want the timeout to repeat, you should re-add it during this callback
* @param context
* @param timeout
*/
void (*do_work)(async_context_t *context, struct async_work_on_timeout *timeout);
/*!
* \brief The next timeout time; this should only be modified during the above methods
* or via async_context methods
*/
absolute_time_t next_time;
/*!
* \brief User data associated with the timeout instance
*/
void *user_data;
} async_at_time_worker_t;
/*! \brief A "worker" instance used by an async_context
* \ingroup pico_async_context
*
* A "worker" represents some external entity that must do work in response
* to some external stimulus (usually an IRQ).
* Its methods are called from the async_context under lock at the given time
*
* \see async_context_add_worker_at
* \see async_context_add_worker_in_ms
*/
typedef struct async_when_pending_worker {
/*!
* \brief private link list pointer
*/
struct async_when_pending_worker *next;
/*!
* \brief Called by the async_context when the worker has been marked as having "work pending"
*
* @param context the async_context
* @param worker the function to be called when work is pending
*/
void (*do_work)(async_context_t *context, struct async_when_pending_worker *worker);
/**
* \brief True if the worker need do_work called
*/
bool work_pending;
/*!
* \brief User data associated with the worker instance
*/
void *user_data;
} async_when_pending_worker_t;
#define ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ 0x1
#define ASYNC_CONTEXT_FLAG_CALLBACK_FROM_IRQ 0x2
#define ASYNC_CONTEXT_FLAG_POLLED 0x4
/*!
* \brief Implementation of an async_context type, providing methods common to that type
* \ingroup pico_async_context
*/
typedef struct async_context_type {
uint16_t type;
// see wrapper functions for documentation
void (*acquire_lock_blocking)(async_context_t *self);
void (*release_lock)(async_context_t *self);
void (*lock_check)(async_context_t *self);
uint32_t (*execute_sync)(async_context_t *context, uint32_t (*func)(void *param), void *param);
bool (*add_at_time_worker)(async_context_t *self, async_at_time_worker_t *worker);
bool (*remove_at_time_worker)(async_context_t *self, async_at_time_worker_t *worker);
bool (*add_when_pending_worker)(async_context_t *self, async_when_pending_worker_t *worker);
bool (*remove_when_pending_worker)(async_context_t *self, async_when_pending_worker_t *worker);
void (*set_work_pending)(async_context_t *self, async_when_pending_worker_t *worker);
void (*poll)(async_context_t *self); // may be NULL
void (*wait_until)(async_context_t *self, absolute_time_t until);
void (*wait_for_work_until)(async_context_t *self, absolute_time_t until);
void (*deinit)(async_context_t *self);
} async_context_type_t;
/*!
* \brief Base structure type of all async_contexts. For details about its use, see \ref pico_async_context.
* \ingroup pico_async_context
*
* Individual async_context_types with additional state, should contain this structure at the start.
*/
struct async_context {
const async_context_type_t *type;
async_when_pending_worker_t *when_pending_list;
async_at_time_worker_t *at_time_list;
absolute_time_t next_time;
uint16_t flags;
uint8_t core_num;
};
/*!
* \brief Acquire the async_context lock
* \ingroup pico_async_context
*
* The owner of the async_context lock is the logic owner of the async_context
* and other work related to this async_context will not happen concurrently.
*
* This method may be called in a nested fashion by the the lock owner.
*
* \note the async_context lock is nestable by the same caller, so an internal count is maintained
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
*
* \see async_context_release_lock
*/
static inline void async_context_acquire_lock_blocking(async_context_t *context) {
context->type->acquire_lock_blocking(context);
}
/*!
* \brief Release the async_context lock
* \ingroup pico_async_context
*
* \note the async_context lock may be called in a nested fashion, so an internal count is maintained. On the outermost
* release, When the outermost lock is released, a check is made for work which might have been skipped while the lock was held,
* and any such work may be performed during this call IF the call is made from the same core that the async_context belongs to.
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
*
* \see async_context_acquire_lock_blocking
*/
static inline void async_context_release_lock(async_context_t *context) {
context->type->release_lock(context);
}
/*!
* \brief Assert if the caller does not own the lock for the async_context
* \ingroup pico_async_context
* \note this method is thread-safe
*
* \param context the async_context
*/
static inline void async_context_lock_check(async_context_t *context) {
context->type->lock_check(context);
}
/*!
* \brief Execute work synchronously on the core the async_context belongs to.
* \ingroup pico_async_context
*
* This method is intended for code external to the async_context (e.g. another thread/task) to
* execute a function with the same guarantees (single core, logical thread of execution) that
* async_context workers are called with.
*
* \note you should NOT call this method while holding the async_context's lock
*
* \param context the async_context
* \param func the function to call
* \param param the parameter to pass to the function
* \return the return value from func
*/
static inline uint32_t async_context_execute_sync(async_context_t *context, uint32_t (*func)(void *param), void *param) {
return context->type->execute_sync(context, func, param);
}
/*!
* \brief Add an "at time" worker to a context
* \ingroup pico_async_context
*
* An "at time" worker will run at or after a specific point in time, and is automatically when (just before) it runs.
*
* The time to fire is specified in the next_time field of the worker.
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param worker the "at time" worker to add
* \return true if the worker was added, false if the worker was already present.
*/
static inline bool async_context_add_at_time_worker(async_context_t *context, async_at_time_worker_t *worker) {
return context->type->add_at_time_worker(context, worker);
}
/*!
* \brief Add an "at time" worker to a context
* \ingroup pico_async_context
*
* An "at time" worker will run at or after a specific point in time, and is automatically when (just before) it runs.
*
* The time to fire is specified by the at parameter.
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param worker the "at time" worker to add
* \param at the time to fire at
* \return true if the worker was added, false if the worker was already present.
*/
static inline bool async_context_add_at_time_worker_at(async_context_t *context, async_at_time_worker_t *worker, absolute_time_t at) {
worker->next_time = at;
return context->type->add_at_time_worker(context, worker);
}
/*!
* \brief Add an "at time" worker to a context
* \ingroup pico_async_context
*
* An "at time" worker will run at or after a specific point in time, and is automatically when (just before) it runs.
*
* The time to fire is specified by a delay via the ms parameter
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param worker the "at time" worker to add
* \param ms the number of milliseconds from now to fire after
* \return true if the worker was added, false if the worker was already present.
*/
static inline bool async_context_add_at_time_worker_in_ms(async_context_t *context, async_at_time_worker_t *worker, uint32_t ms) {
worker->next_time = make_timeout_time_ms(ms);
return context->type->add_at_time_worker(context, worker);
}
/*!
* \brief Remove an "at time" worker from a context
* \ingroup pico_async_context
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param worker the "at time" worker to remove
* \return true if the worker was removed, false if the instance not present.
*/
static inline bool async_context_remove_at_time_worker(async_context_t *context, async_at_time_worker_t *worker) {
return context->type->remove_at_time_worker(context, worker);
}
/*!
* \brief Add a "when pending" worker to a context
* \ingroup pico_async_context
*
* An "when pending" worker will run when it is pending (can be set via \ref async_context_set_work_pending), and
* is NOT automatically removed when it runs.
*
* The time to fire is specified by a delay via the ms parameter
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param worker the "when pending" worker to add
* \return true if the worker was added, false if the worker was already present.
*/
static inline bool async_context_add_when_pending_worker(async_context_t *context, async_when_pending_worker_t *worker) {
return context->type->add_when_pending_worker(context, worker);
}
/*!
* \brief Remove a "when pending" worker from a context
* \ingroup pico_async_context
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param worker the "when pending" worker to remove
* \return true if the worker was removed, false if the instance not present.
*/
static inline bool async_context_remove_when_pending_worker(async_context_t *context, async_when_pending_worker_t *worker) {
return context->type->remove_when_pending_worker(context, worker);
}
/*!
* \brief Mark a "when pending" worker as having work pending
* \ingroup pico_async_context
*
* The worker will be run from the async_context at a later time.
*
* \note this method may be called from any context including IRQs
*
* \param context the async_context
* \param worker the "when pending" worker to mark as pending.
*/
static inline void async_context_set_work_pending(async_context_t *context, async_when_pending_worker_t *worker) {
context->type->set_work_pending(context, worker);
}
/*!
* \brief Perform any pending work for polling style async_context
* \ingroup pico_async_context
*
* For a polled async_context (e.g. \ref async_context_poll) the user is responsible for calling this method
* periodically to perform any required work.
*
* This method may immediately perform outstanding work on other context types, but is not required to.
*
* \param context the async_context
*/
static inline void async_context_poll(async_context_t *context) {
if (context->type->poll) context->type->poll(context);
}
/*!
* \brief sleep until the specified time in an async_context callback safe way
* \ingroup pico_async_context
*
* \note for async_contexts that provide locking (not async_context_poll), this method is threadsafe. and may be called from within any
* worker method called by the async_context or from any other non-IRQ context.
*
* \param context the async_context
* \param until the time to sleep until
*/
static inline void async_context_wait_until(async_context_t *context, absolute_time_t until) {
context->type->wait_until(context, until);
}
/*!
* \brief Block until work needs to be done or the specified time has been reached
* \ingroup pico_async_context
*
* \note this method should not be called from a worker callback
*
* \param context the async_context
* \param until the time to return at if no work is required
*/
static inline void async_context_wait_for_work_until(async_context_t *context, absolute_time_t until) {
context->type->wait_for_work_until(context, until);
}
/*!
* \brief Block until work needs to be done or the specified number of milliseconds have passed
* \ingroup pico_async_context
*
* \note this method should not be called from a worker callback
*
* \param context the async_context
* \param ms the number of milliseconds to return after if no work is required
*/
static inline void async_context_wait_for_work_ms(async_context_t *context, uint32_t ms) {
async_context_wait_for_work_until(context, make_timeout_time_ms(ms));
}
/*!
* \brief Return the processor core this async_context belongs to
* \ingroup pico_async_context
*
* \param context the async_context
* \return the physical core number
*/
static inline uint async_context_core_num(const async_context_t *context) {
return context->core_num;
}
/*!
* \brief End async_context processing, and free any resources
* \ingroup pico_async_context
*
* Note the user should clean up any resources associated with workers
* in the async_context themselves.
*
* Asynchronous (non-polled) async_contexts guarantee that no
* callback is being called once this method returns.
*
* \param context the async_context
*/
static inline void async_context_deinit(async_context_t *context) {
context->type->deinit(context);
}
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_ASYNC_CONTEXT_BASE_H
#define _PICO_ASYNC_CONTEXT_BASE_H
#include "pico/async_context.h"
#ifdef __cplusplus
extern "C" {
#endif
// common functions for async_context implementations to use
bool async_context_base_add_at_time_worker(async_context_t *self, async_at_time_worker_t *worker);
bool async_context_base_remove_at_time_worker(async_context_t *self, async_at_time_worker_t *worker);
bool async_context_base_add_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker);
bool async_context_base_remove_when_pending_worker(async_context_t *self, async_when_pending_worker_t *worker);
async_at_time_worker_t *async_context_base_remove_ready_at_time_worker(async_context_t *self);
void async_context_base_refresh_next_timeout(async_context_t *self);
absolute_time_t async_context_base_execute_once(async_context_t *self);
bool async_context_base_needs_servicing(async_context_t *self);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,129 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_ASYNC_CONTEXT_FREERTOS_H
#define _PICO_ASYNC_CONTEXT_FREERTOS_H
/** \file pico/async_context.h
* \defgroup async_context_freertos async_context_freertos
* \ingroup pico_async_context
*
* \brief async_context_freertos provides an implementation of \ref async_context that handles asynchronous
* work in a separate FreeRTOS task.
*/
#include "pico/async_context.h"
// FreeRTOS includes
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_PRIORITY
#define ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_PRIORITY ( tskIDLE_PRIORITY + 4)
#endif
#ifndef ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_STACK_SIZE
#define ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_STACK_SIZE configMINIMAL_STACK_SIZE
#endif
typedef struct async_context_freertos async_context_freertos_t;
#if !defined(configNUMBER_OF_CORES) && defined(configNUM_CORES)
#if !portSUPPORT_SMP
#error configNUMBER_OF_CORES is the new name for configNUM_CORES
#else
// portSUPPORT_SMP was defined in old smp branch
#error configNUMBER_OF_CORES is the new name for configNUM_CORES, however it looks like you may need to define both as you are using an old SMP branch of FreeRTOS
#endif
#endif
/**
* \brief Configuration object for async_context_freertos instances.
*/
typedef struct async_context_freertos_config {
/**
* \brief Task priority for the async_context task
*/
UBaseType_t task_priority;
/**
* \brief Stack size for the async_context task
*/
configSTACK_DEPTH_TYPE task_stack_size;
/**
* \brief the core ID (see \ref portGET_CORE_ID()) to pin the task to.
* This is only relevant in SMP mode.
*/
#if configUSE_CORE_AFFINITY && configNUMBER_OF_CORES > 1
UBaseType_t task_core_id;
#endif
} async_context_freertos_config_t;
struct async_context_freertos {
async_context_t core;
SemaphoreHandle_t lock_mutex;
SemaphoreHandle_t work_needed_sem;
TimerHandle_t timer_handle;
TaskHandle_t task_handle;
uint8_t nesting;
volatile bool task_should_exit;
};
/*!
* \brief Initialize an async_context_freertos instance using the specified configuration
* \ingroup async_context_freertos
*
* If this method succeeds (returns true), then the async_context is available for use
* and can be de-initialized by calling async_context_deinit().
*
* \param self a pointer to async_context_freertos structure to initialize
* \param config the configuration object specifying characteristics for the async_context
* \return true if initialization is successful, false otherwise
*/
bool async_context_freertos_init(async_context_freertos_t *self, async_context_freertos_config_t *config);
/*!
* \brief Return a copy of the default configuration object used by \ref async_context_freertos_init_with_defaults()
* \ingroup async_context_freertos
*
* The caller can then modify just the settings it cares about, and call \ref async_context_freertos_init()
* \return the default configuration object
*/
static inline async_context_freertos_config_t async_context_freertos_default_config(void) {
async_context_freertos_config_t config = {
.task_priority = ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_PRIORITY,
.task_stack_size = ASYNC_CONTEXT_DEFAULT_FREERTOS_TASK_STACK_SIZE,
#if configUSE_CORE_AFFINITY && configNUMBER_OF_CORES > 1
.task_core_id = (UBaseType_t)-1, // none
#endif
};
return config;
}
/*!
* \brief Initialize an async_context_freertos instance with default values
* \ingroup async_context_freertos
*
* If this method succeeds (returns true), then the async_context is available for use
* and can be de-initialized by calling async_context_deinit().
*
* \param self a pointer to async_context_freertos structure to initialize
* \return true if initialization is successful, false otherwise
*/
static inline bool async_context_freertos_init_with_defaults(async_context_freertos_t *self) {
async_context_freertos_config_t config = async_context_freertos_default_config();
return async_context_freertos_init(self, &config);
}
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_ASYNC_CONTEXT_POLL_H
#define _PICO_ASYNC_CONTEXT_POLL_H
/** \file pico/async_context.h
* \defgroup async_context_poll async_context_poll
* \ingroup pico_async_context
*
* \brief async_context_poll provides an implementation of \ref async_context that is intended for use with a simple
* polling loop on one core. It is not thread safe.
*
* The \ref async_context_poll() method must be called periodically to handle asynchronous work that may now be
* pending. \ref async_context_wait_for_work_until() may be used to block a polling loop until there is work to do,
* and prevent tight spinning.
*/
#include "pico/async_context.h"
#include "pico/sem.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct async_context_poll {
async_context_t core;
semaphore_t sem;
} async_context_poll_t;
/*!
* \brief Initialize an async_context_poll instance with default values
* \ingroup async_context_poll
*
* If this method succeeds (returns true), then the async_context is available for use
* and can be de-initialized by calling async_context_deinit().
*
* \param self a pointer to async_context_poll structure to initialize
* \return true if initialization is successful, false otherwise
*/
bool async_context_poll_init_with_defaults(async_context_poll_t *self);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,112 @@
/*
* Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_ASYNC_CONTEXT_THREADSAFE_BACKGROUND_H
#define _PICO_ASYNC_CONTEXT_THREADSAFE_BACKGROUND_H
/** \file pico/async_context.h
* \defgroup async_context_threadsafe_background async_context_threadsafe_background
* \ingroup pico_async_context
*
* \brief async_context_threadsafe_background provides an implementation of \ref async_context that handles asynchronous
* work in a low priority IRQ, and there is no need for the user to poll for work
*
* \note The workers used with this async_context MUST be safe to call from an IRQ.
*/
#include "pico/async_context.h"
#include "pico/sem.h"
#include "pico/mutex.h"
#include "hardware/irq.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
#define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE LIB_PICO_MULTICORE
#endif
typedef struct async_context_threadsafe_background async_context_threadsafe_background_t;
/**
* \brief Configuration object for async_context_threadsafe_background instances.
*/
typedef struct async_context_threadsafe_background_config {
/**
* \brief the priority of the low priority IRQ
*/
uint8_t low_priority_irq_handler_priority;
/**
* \brief a specific alarm pool to use (or NULL to use ta default)
*
* \note this alarm pool MUST be on the same core as the async_context
*
* The default alarm pool used is the "default alarm pool" (see
* \ref alarm_pool_get_default()) if available, and if that is on the same
* core, otherwise a private alarm_pool instance created during
* initialization.
*/
alarm_pool_t *custom_alarm_pool;
} async_context_threadsafe_background_config_t;
struct async_context_threadsafe_background {
async_context_t core;
alarm_pool_t *alarm_pool; // this must be on the same core as core_num
absolute_time_t last_set_alarm_time;
recursive_mutex_t lock_mutex;
semaphore_t work_needed_sem;
volatile alarm_id_t alarm_id;
#if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
volatile alarm_id_t force_alarm_id;
bool alarm_pool_owned;
#endif
uint8_t low_priority_irq_num;
volatile bool alarm_pending;
};
/*!
* \brief Initialize an async_context_threadsafe_background instance using the specified configuration
* \ingroup async_context_threadsafe_background
*
* If this method succeeds (returns true), then the async_context is available for use
* and can be de-initialized by calling async_context_deinit().
*
* \param self a pointer to async_context_threadsafe_background structure to initialize
* \param config the configuration object specifying characteristics for the async_context
* \return true if initialization is successful, false otherwise
*/
bool async_context_threadsafe_background_init(async_context_threadsafe_background_t *self, async_context_threadsafe_background_config_t *config);
/*!
* \brief Return a copy of the default configuration object used by \ref async_context_threadsafe_background_init_with_defaults()
* \ingroup async_context_threadsafe_background
*
* The caller can then modify just the settings it cares about, and call \ref async_context_threadsafe_background_init()
* \return the default configuration object
*/
async_context_threadsafe_background_config_t async_context_threadsafe_background_default_config(void);
/*!
* \brief Initialize an async_context_threadsafe_background instance with default values
* \ingroup async_context_threadsafe_background
*
* If this method succeeds (returns true), then the async_context is available for use
* and can be de-initialized by calling async_context_deinit().
*
* \param self a pointer to async_context_threadsafe_background structure to initialize
* \return true if initialization is successful, false otherwise
*/
static inline bool async_context_threadsafe_background_init_with_defaults(async_context_threadsafe_background_t *self) {
async_context_threadsafe_background_config_t config = async_context_threadsafe_background_default_config();
return async_context_threadsafe_background_init(self, &config);
}
#ifdef __cplusplus
}
#endif
#endif