1
0
Fork 0
mirror of https://github.com/betaflight/betaflight.git synced 2025-07-16 04:45:24 +03:00

Adding RP2350 SDK and target framework (#13988)

* Adding RP2350 SDK and target framework

* Spacing

* Removing board definitions
This commit is contained in:
J Blackman 2024-10-23 10:02:48 +11:00 committed by GitHub
parent 462cb05930
commit 2dd6f95aad
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
576 changed files with 435012 additions and 0 deletions

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/critical_section.h"
#if PICO_32BIT
static_assert(sizeof(critical_section_t) == 8, "");
#endif
void critical_section_init(critical_section_t *crit_sec) {
critical_section_init_with_lock_num(crit_sec, (uint)spin_lock_claim_unused(true));
}
void critical_section_init_with_lock_num(critical_section_t *crit_sec, uint lock_num) {
crit_sec->spin_lock = spin_lock_instance(lock_num);
__mem_fence_release();
}
void critical_section_deinit(critical_section_t *crit_sec) {
spin_lock_unclaim(spin_lock_get_num(crit_sec->spin_lock));
crit_sec->spin_lock = NULL;
}

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_CRITICAL_SECTION_H
#define _PICO_CRITICAL_SECTION_H
#include "pico/lock_core.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \file critical_section.h
* \defgroup critical_section critical_section
* \ingroup pico_sync
* \brief Critical Section API for short-lived mutual exclusion safe for IRQ and multi-core
*
* A critical section is non-reentrant, and provides mutual exclusion using a spin-lock to prevent access
* from the other core, and from (higher priority) interrupts on the same core. It does the former
* using a spin lock and the latter by disabling interrupts on the calling core.
*
* Because interrupts are disabled when a critical_section is owned, uses of the critical_section
* should be as short as possible.
*/
typedef struct __packed_aligned critical_section {
spin_lock_t *spin_lock;
uint32_t save;
} critical_section_t;
/*! \brief Initialise a critical_section structure allowing the system to assign a spin lock number
* \ingroup critical_section
*
* The critical section is initialized ready for use, and will use a (possibly shared) spin lock
* number assigned by the system. Note that in general it is unlikely that you would be nesting
* critical sections, however if you do so you *must* use \ref critical_section_init_with_lock_num
* to ensure that the spin locks used are different.
*
* \param crit_sec Pointer to critical_section structure
*/
void critical_section_init(critical_section_t *crit_sec);
/*! \brief Initialise a critical_section structure assigning a specific spin lock number
* \ingroup critical_section
* \param crit_sec Pointer to critical_section structure
* \param lock_num the specific spin lock number to use
*/
void critical_section_init_with_lock_num(critical_section_t *crit_sec, uint lock_num);
/*! \brief Enter a critical_section
* \ingroup critical_section
*
* If the spin lock associated with this critical section is in use, then this
* method will block until it is released.
*
* \param crit_sec Pointer to critical_section structure
*/
static inline void critical_section_enter_blocking(critical_section_t *crit_sec) {
crit_sec->save = spin_lock_blocking(crit_sec->spin_lock);
}
/*! \brief Release a critical_section
* \ingroup critical_section
*
* \param crit_sec Pointer to critical_section structure
*/
static inline void critical_section_exit(critical_section_t *crit_sec) {
spin_unlock(crit_sec->spin_lock, crit_sec->save);
}
/*! \brief De-Initialise a critical_section created by the critical_section_init method
* \ingroup critical_section
*
* This method is only used to free the associated spin lock allocated via
* the critical_section_init method (it should not be used to de-initialize a spin lock
* created via critical_section_init_with_lock_num). After this call, the critical section is invalid
*
* \param crit_sec Pointer to critical_section structure
*/
void critical_section_deinit(critical_section_t *crit_sec);
/*! \brief Test whether a critical_section has been initialized
* \ingroup mutex
*
* \param crit_sec Pointer to critical_section structure
* \return true if the critical section is initialized, false otherwise
*/
static inline bool critical_section_is_initialized(critical_section_t *crit_sec) {
return crit_sec->spin_lock != 0;
}
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,199 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_LOCK_CORE_H
#define _PICO_LOCK_CORE_H
#include "pico.h"
#include "pico/time.h"
#include "hardware/sync.h"
/** \file lock_core.h
* \defgroup lock_core lock_core
* \ingroup pico_sync
* \brief base synchronization/lock primitive support.
*
* Most of the pico_sync locking primitives contain a lock_core_t structure member. This currently just holds a spin
* lock which is used only to protect the contents of the rest of the structure as part of implementing the synchronization
* primitive. As such, the spin_lock member of lock core is never still held on return from any function for the primitive.
*
* \ref critical_section is an exceptional case in that it does not have a lock_core_t and simply wraps a spin lock, providing
* methods to lock and unlock said spin lock.
*
* lock_core based structures work by locking the spin lock, checking state, and then deciding whether they additionally need to block
* or notify when the spin lock is released. In the blocking case, they will wake up again in the future, and try the process again.
*
* By default the SDK just uses the processors' events via SEV and WEV for notification and blocking as these are sufficient for
* cross core, and notification from interrupt handlers. However macros are defined in this file that abstract the wait
* and notify mechanisms to allow the SDK locking functions to effectively be used within an RTOS or other environment.
*
* When implementing an RTOS, it is desirable for the SDK synchronization primitives that wait, to block the calling task (and immediately yield),
* and those that notify, to wake a blocked task which isn't on processor. At least the wait macro implementation needs to be atomic with the protecting
* spin_lock unlock from the callers point of view; i.e. the task should unlock the spin lock when it starts its wait. Such implementation is
* up to the RTOS integration, however the macros are defined such that such operations are always combined into a single call
* (so they can be performed atomically) even though the default implementation does not need this, as a WFE which starts
* following the corresponding SEV is not missed.
*/
// PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_LOCK_CORE, Enable/disable assertions in the lock core, type=bool, default=0, group=pico_sync
#ifndef PARAM_ASSERTIONS_ENABLED_LOCK_CORE
#define PARAM_ASSERTIONS_ENABLED_LOCK_CORE 0
#endif
/** \file lock_core.h
* \ingroup lock_core
*
* Base implementation for locking primitives protected by a spin lock. The spin lock is only used to protect
* access to the remaining lock state (in primitives using lock_core); it is never left locked outside
* of the function implementations
*/
struct lock_core {
// spin lock protecting this lock's state
spin_lock_t *spin_lock;
// note any lock members in containing structures need not be volatile;
// they are protected by memory/compiler barriers when gaining and release spin locks
};
typedef struct lock_core lock_core_t;
/*! \brief Initialise a lock structure
* \ingroup lock_core
*
* Inititalize a lock structure, providing the spin lock number to use for protecting internal state.
*
* \param core Pointer to the lock_core to initialize
* \param lock_num Spin lock number to use for the lock. As the spin lock is only used internally to the locking primitive
* method implementations, this does not need to be globally unique, however could suffer contention
*/
void lock_init(lock_core_t *core, uint lock_num);
#ifndef lock_owner_id_t
/*! \brief type to use to store the 'owner' of a lock.
* \ingroup lock_core
*
* By default this is int8_t as it only needs to store the core number or -1, however it may be
* overridden if a larger type is required (e.g. for an RTOS task id)
*/
#define lock_owner_id_t int8_t
#endif
#ifndef LOCK_INVALID_OWNER_ID
/*! \brief marker value to use for a lock_owner_id_t which does not refer to any valid owner
* \ingroup lock_core
*/
#define LOCK_INVALID_OWNER_ID ((lock_owner_id_t)-1)
#endif
#ifndef lock_get_caller_owner_id
/*! \brief return the owner id for the caller
* \ingroup lock_core
*
* By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id)
*/
#define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num())
#ifndef lock_is_owner_id_valid
#define lock_is_owner_id_valid(id) ((id)>=0)
#endif
#endif
#ifndef lock_is_owner_id_valid
#define lock_is_owner_id_valid(id) ((id) != LOCK_INVALID_OWNER_ID)
#endif
#ifndef lock_internal_spin_unlock_with_wait
/*! \brief Atomically unlock the lock's spin lock, and wait for a notification.
* \ingroup lock_core
*
* _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify
* to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing
* a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify
* for the same lock, which completes after this call starts.
*
* In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify
* has subsequently been called on the same lock instance, however this method is free to return at _any_ point before that;
* this macro is _always_ used in a loop which locks the spin lock, checks the internal locking primitive state and then
* waits again if the calling thread should not proceed.
*
* By default this macro simply unlocks the spin lock, and then performs a WFE, but may be overridden
* (e.g. to actually block the RTOS task).
*
* \param lock the lock_core for the primitive which needs to block
* \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the `PRIMASK`
* state when the spin lock was acquire
*/
#define lock_internal_spin_unlock_with_wait(lock, save) spin_unlock((lock)->spin_lock, save), __wfe()
#endif
#ifndef lock_internal_spin_unlock_with_notify
/*! \brief Atomically unlock the lock's spin lock, and send a notification
* \ingroup lock_core
*
* _Atomic_ here refers to the fact that it should not be possible for this notification to happen during a
* lock_internal_spin_unlock_with_wait in a way that that wait does not see the notification (i.e. causing
* a missed notification). In other words this method should always wake up any lock_internal_spin_unlock_with_wait
* which started before this call completes.
*
* In an ideal implementation, this method would wake up only the corresponding lock_internal_spin_unlock_with_wait
* that has been called on the same lock instance, however it is free to wake up any of them, as they will check
* their condition and then re-wait if necessary/
*
* By default this macro simply unlocks the spin lock, and then performs a SEV, but may be overridden
* (e.g. to actually un-block RTOS task(s)).
*
* \param lock the lock_core for the primitive which needs to block
* \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK
* state when the spin lock was acquire)
*/
#define lock_internal_spin_unlock_with_notify(lock, save) spin_unlock((lock)->spin_lock, save), __sev()
#endif
#ifndef lock_internal_spin_unlock_with_best_effort_wait_or_timeout
/*! \brief Atomically unlock the lock's spin lock, and wait for a notification or a timeout
* \ingroup lock_core
*
* _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify
* to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing
* a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify
* for the same lock, which completes after this call starts.
*
* In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify
* has subsequently been called on the same lock instance or the timeout has been reached, however this method is free to return
* at _any_ point before that; this macro is _always_ used in a loop which locks the spin lock, checks the internal locking
* primitive state and then waits again if the calling thread should not proceed.
*
* By default this simply unlocks the spin lock, and then calls \ref best_effort_wfe_or_timeout
* but may be overridden (e.g. to actually block the RTOS task with a timeout).
*
* \param lock the lock_core for the primitive which needs to block
* \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK
* state when the spin lock was acquire)
* \param until the \ref absolute_time_t value
* \return true if the timeout has been reached
*/
#define lock_internal_spin_unlock_with_best_effort_wait_or_timeout(lock, save, until) ({ \
spin_unlock((lock)->spin_lock, save); \
best_effort_wfe_or_timeout(until); \
})
#endif
#ifndef sync_internal_yield_until_before
/*! \brief yield to other processing until some time before the requested time
* \ingroup lock_core
*
* This method is provided for cases where the caller has no useful work to do
* until the specified time.
*
* By default this method does nothing, however it can be overridden (for example by an
* RTOS which is able to block the current task until the scheduler tick before
* the given time)
*
* \param until the \ref absolute_time_t value
*/
#define sync_internal_yield_until_before(until) ((void)0)
#endif
#endif

View file

@ -0,0 +1,313 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_MUTEX_H
#define _PICO_MUTEX_H
#include "pico/lock_core.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \file mutex.h
* \defgroup mutex mutex
* \ingroup pico_sync
* \brief Mutex API for non IRQ mutual exclusion between cores
*
* Mutexes are application level locks usually used protecting data structures that might be used by
* multiple threads of execution. Unlike critical sections, the mutex protected code is not necessarily
* required/expected to complete quickly, as no other system wide locks are held on account of an acquired mutex.
*
* When acquired, the mutex has an owner (see \ref lock_get_caller_owner_id) which with the plain SDK is just
* the acquiring core, but in an RTOS it could be a task, or an IRQ handler context.
*
* Two variants of mutex are provided; \ref mutex_t (and associated mutex_ functions) is a regular mutex that cannot
* be acquired recursively by the same owner (a deadlock will occur if you try). \ref recursive_mutex_t
* (and associated recursive_mutex_ functions) is a recursive mutex that can be recursively obtained by
* the same caller, at the expense of some more overhead when acquiring and releasing.
*
* It is generally a bad idea to call blocking mutex_ or recursive_mutex_ functions from within an IRQ handler.
* It is valid to call \ref mutex_try_enter or \ref recursive_mutex_try_enter from within an IRQ handler, if the operation
* that would be conducted under lock can be skipped if the mutex is locked (at least by the same owner).
*
* NOTE: For backwards compatibility with version 1.2.0 of the SDK, if the define
* PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY is set to 1, then the the regular mutex_ functions
* may also be used for recursive mutexes. This flag will be removed in a future version of the SDK.
*
* See \ref critical_section.h for protecting access between multiple cores AND IRQ handlers
*/
/*! \brief recursive mutex instance
* \ingroup mutex
*/
typedef struct {
lock_core_t core;
lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned
uint8_t enter_count; //! ownership count
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
bool recursive;
#endif
} recursive_mutex_t;
/*! \brief regular (non recursive) mutex instance
* \ingroup mutex
*/
#if !PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
typedef struct mutex {
lock_core_t core;
lock_owner_id_t owner; //! owner id LOCK_INVALID_OWNER_ID for unowned
} mutex_t;
#else
typedef recursive_mutex_t mutex_t; // they are one and the same when backwards compatible with SDK1.2.0
#endif
/*! \brief Initialise a mutex structure
* \ingroup mutex
*
* \param mtx Pointer to mutex structure
*/
void mutex_init(mutex_t *mtx);
/*! \brief Initialise a recursive mutex structure
* \ingroup mutex
*
* A recursive mutex may be entered in a nested fashion by the same owner
*
* \param mtx Pointer to recursive mutex structure
*/
void recursive_mutex_init(recursive_mutex_t *mtx);
/*! \brief Take ownership of a mutex
* \ingroup mutex
*
* This function will block until the caller can be granted ownership of the mutex.
* On return the caller owns the mutex
*
* \param mtx Pointer to mutex structure
*/
void mutex_enter_blocking(mutex_t *mtx);
/*! \brief Take ownership of a recursive mutex
* \ingroup mutex
*
* This function will block until the caller can be granted ownership of the mutex.
* On return the caller owns the mutex
*
* \param mtx Pointer to recursive mutex structure
*/
void recursive_mutex_enter_blocking(recursive_mutex_t *mtx);
/*! \brief Attempt to take ownership of a mutex
* \ingroup mutex
*
* If the mutex wasn't owned, this will claim the mutex for the caller and return true.
* Otherwise (if the mutex was already owned) this will return false and the
* caller will NOT own the mutex.
*
* \param mtx Pointer to mutex structure
* \param owner_out If mutex was already owned, and this pointer is non-zero, it will be filled in with the owner id of the current owner of the mutex
* \return true if mutex now owned, false otherwise
*/
bool mutex_try_enter(mutex_t *mtx, uint32_t *owner_out);
/*! \brief Attempt to take ownership of a mutex until the specified time
* \ingroup mutex
*
* If the mutex wasn't owned, this method will immediately claim the mutex for the caller and return true.
* If the mutex is owned by the caller, this method will immediately return false,
* If the mutex is owned by someone else, this method will try to claim it until the specified time, returning
* true if it succeeds, or false on timeout
*
* \param mtx Pointer to mutex structure
* \param until The time after which to return if the caller cannot be granted ownership of the mutex
* \return true if mutex now owned, false otherwise
*/
bool mutex_try_enter_block_until(mutex_t *mtx, absolute_time_t until);
/*! \brief Attempt to take ownership of a recursive mutex
* \ingroup mutex
*
* If the mutex wasn't owned or was owned by the caller, this will claim the mutex and return true.
* Otherwise (if the mutex was already owned by another owner) this will return false and the
* caller will NOT own the mutex.
*
* \param mtx Pointer to recursive mutex structure
* \param owner_out If mutex was already owned by another owner, and this pointer is non-zero,
* it will be filled in with the owner id of the current owner of the mutex
* \return true if the recursive mutex (now) owned, false otherwise
*/
bool recursive_mutex_try_enter(recursive_mutex_t *mtx, uint32_t *owner_out);
/*! \brief Wait for mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the mutex. If the caller
* can be granted ownership of the mutex before the timeout expires, then true will be returned
* and the caller will own the mutex, otherwise false will be returned and the caller will NOT own the mutex.
*
* \param mtx Pointer to mutex structure
* \param timeout_ms The timeout in milliseconds.
* \return true if mutex now owned, false if timeout occurred before ownership could be granted
*/
bool mutex_enter_timeout_ms(mutex_t *mtx, uint32_t timeout_ms);
/*! \brief Wait for recursive mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the recursive mutex. If the caller
* already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires,
* then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller
* will NOT own the mutex.
*
* \param mtx Pointer to recursive mutex structure
* \param timeout_ms The timeout in milliseconds.
* \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted
*/
bool recursive_mutex_enter_timeout_ms(recursive_mutex_t *mtx, uint32_t timeout_ms);
/*! \brief Wait for mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the mutex. If the caller
* can be granted ownership of the mutex before the timeout expires, then true will be returned
* and the caller will own the mutex, otherwise false will be returned and the caller
* will NOT own the mutex.
*
* \param mtx Pointer to mutex structure
* \param timeout_us The timeout in microseconds.
* \return true if mutex now owned, false if timeout occurred before ownership could be granted
*/
bool mutex_enter_timeout_us(mutex_t *mtx, uint32_t timeout_us);
/*! \brief Wait for recursive mutex with timeout
* \ingroup mutex
*
* Wait for up to the specific time to take ownership of the recursive mutex. If the caller
* already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires,
* then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller
* will NOT own the mutex.
*
* \param mtx Pointer to mutex structure
* \param timeout_us The timeout in microseconds.
* \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted
*/
bool recursive_mutex_enter_timeout_us(recursive_mutex_t *mtx, uint32_t timeout_us);
/*! \brief Wait for mutex until a specific time
* \ingroup mutex
*
* Wait until the specific time to take ownership of the mutex. If the caller
* can be granted ownership of the mutex before the timeout expires, then true will be returned
* and the caller will own the mutex, otherwise false will be returned and the caller
* will NOT own the mutex.
*
* \param mtx Pointer to mutex structure
* \param until The time after which to return if the caller cannot be granted ownership of the mutex
* \return true if mutex now owned, false if timeout occurred before ownership could be granted
*/
bool mutex_enter_block_until(mutex_t *mtx, absolute_time_t until);
/*! \brief Wait for mutex until a specific time
* \ingroup mutex
*
* Wait until the specific time to take ownership of the mutex. If the caller
* already has ownership of the mutex or can be granted ownership of the mutex before the timeout expires,
* then true will be returned and the caller will own the mutex, otherwise false will be returned and the caller
* will NOT own the mutex.
*
* \param mtx Pointer to recursive mutex structure
* \param until The time after which to return if the caller cannot be granted ownership of the mutex
* \return true if the recursive mutex (now) owned, false if timeout occurred before ownership could be granted
*/
bool recursive_mutex_enter_block_until(recursive_mutex_t *mtx, absolute_time_t until);
/*! \brief Release ownership of a mutex
* \ingroup mutex
*
* \param mtx Pointer to mutex structure
*/
void mutex_exit(mutex_t *mtx);
/*! \brief Release ownership of a recursive mutex
* \ingroup mutex
*
* \param mtx Pointer to recursive mutex structure
*/
void recursive_mutex_exit(recursive_mutex_t *mtx);
/*! \brief Test for mutex initialized state
* \ingroup mutex
*
* \param mtx Pointer to mutex structure
* \return true if the mutex is initialized, false otherwise
*/
static inline bool mutex_is_initialized(mutex_t *mtx) {
return mtx->core.spin_lock != 0;
}
/*! \brief Test for recursive mutex initialized state
* \ingroup mutex
*
* \param mtx Pointer to recursive mutex structure
* \return true if the recursive mutex is initialized, false otherwise
*/
static inline bool recursive_mutex_is_initialized(recursive_mutex_t *mtx) {
return mtx->core.spin_lock != 0;
}
/*! \brief Helper macro for static definition of mutexes
* \ingroup mutex
*
* A mutex defined as follows:
*
* ```c
* auto_init_mutex(my_mutex);
* ```
*
* Is equivalent to doing
*
* ```c
* static mutex_t my_mutex;
*
* void my_init_function() {
* mutex_init(&my_mutex);
* }
* ```
*
* But the initialization of the mutex is performed automatically during runtime initialization
*/
#define auto_init_mutex(name) static __attribute__((section(".mutex_array"))) mutex_t name
/*! \brief Helper macro for static definition of recursive mutexes
* \ingroup mutex
*
* A recursive mutex defined as follows:
*
* ```c
* auto_init_recursive_mutex(my_recursive_mutex);
* ```
*
* Is equivalent to doing
*
* ```c
* static recursive_mutex_t my_recursive_mutex;
*
* void my_init_function() {
* recursive_mutex_init(&my_recursive_mutex);
* }
* ```
*
* But the initialization of the mutex is performed automatically during runtime initialization
*/
#define auto_init_recursive_mutex(name) static __attribute__((section(".mutex_array"))) recursive_mutex_t name = { .core = { .spin_lock = (spin_lock_t *)1 /* marker for runtime_init */ }, .owner = 0, .enter_count = 0 }
void runtime_init_mutex(void);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,139 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_SEM_H
#define _PICO_SEM_H
#include "pico/lock_core.h"
/** \file sem.h
* \defgroup sem sem
* \ingroup pico_sync
* \brief Semaphore API for restricting access to a resource
*
* A semaphore holds a number of available permits. `sem_acquire` methods will acquire a permit if available
* (reducing the available count by 1) or block if the number of available permits is 0.
* \ref sem_release() increases the number of available permits by one potentially unblocking a `sem_acquire` method.
*
* Note that \ref sem_release() may be called an arbitrary number of times, however the number of available
* permits is capped to the max_permit value specified during semaphore initialization.
*
* Although these semaphore related functions can be used from IRQ handlers, it is obviously preferable to only
* release semaphores from within an IRQ handler (i.e. avoid blocking)
*/
#ifdef __cplusplus
extern "C" {
#endif
typedef struct semaphore {
struct lock_core core;
int16_t permits;
int16_t max_permits;
} semaphore_t;
/*! \brief Initialise a semaphore structure
* \ingroup sem
*
* \param sem Pointer to semaphore structure
* \param initial_permits How many permits are initially acquired
* \param max_permits Total number of permits allowed for this semaphore
*/
void sem_init(semaphore_t *sem, int16_t initial_permits, int16_t max_permits);
/*! \brief Return number of available permits on the semaphore
* \ingroup sem
*
* \param sem Pointer to semaphore structure
* \return The number of permits available on the semaphore.
*/
int sem_available(semaphore_t *sem);
/*! \brief Release a permit on a semaphore
* \ingroup sem
*
* Increases the number of permits by one (unless the number of permits is already at the maximum).
* A blocked `sem_acquire` will be released if the number of permits is increased.
*
* \param sem Pointer to semaphore structure
* \return true if the number of permits available was increased.
*/
bool sem_release(semaphore_t *sem);
/*! \brief Reset semaphore to a specific number of available permits
* \ingroup sem
*
* Reset value should be from 0 to the max_permits specified in the init function
*
* \param sem Pointer to semaphore structure
* \param permits the new number of available permits
*/
void sem_reset(semaphore_t *sem, int16_t permits);
/*! \brief Acquire a permit from the semaphore
* \ingroup sem
*
* This function will block and wait if no permits are available.
*
* \param sem Pointer to semaphore structure
*/
void sem_acquire_blocking(semaphore_t *sem);
/*! \brief Acquire a permit from a semaphore, with timeout
* \ingroup sem
*
* This function will block and wait if no permits are available, until the
* defined timeout has been reached. If the timeout is reached the function will
* return false, otherwise it will return true.
*
* \param sem Pointer to semaphore structure
* \param timeout_ms Time to wait to acquire the semaphore, in milliseconds.
* \return false if timeout reached, true if permit was acquired.
*/
bool sem_acquire_timeout_ms(semaphore_t *sem, uint32_t timeout_ms);
/*! \brief Acquire a permit from a semaphore, with timeout
* \ingroup sem
*
* This function will block and wait if no permits are available, until the
* defined timeout has been reached. If the timeout is reached the function will
* return false, otherwise it will return true.
*
* \param sem Pointer to semaphore structure
* \param timeout_us Time to wait to acquire the semaphore, in microseconds.
* \return false if timeout reached, true if permit was acquired.
*/
bool sem_acquire_timeout_us(semaphore_t *sem, uint32_t timeout_us);
/*! \brief Wait to acquire a permit from a semaphore until a specific time
* \ingroup sem
*
* This function will block and wait if no permits are available, until the
* specified timeout time. If the timeout is reached the function will
* return false, otherwise it will return true.
*
* \param sem Pointer to semaphore structure
* \param until The time after which to return if the sem is not available.
* \return true if permit was acquired, false if the until time was reached before
* acquiring.
*/
bool sem_acquire_block_until(semaphore_t *sem, absolute_time_t until);
/*! \brief Attempt to acquire a permit from a semaphore without blocking
* \ingroup sem
*
* This function will return false without blocking if no permits are
* available, otherwise it will acquire a permit and return true.
*
* \param sem Pointer to semaphore structure
* \return true if permit was acquired.
*/
bool sem_try_acquire(semaphore_t *sem);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _PICO_SYNC_H
#define _PICO_SYNC_H
/** \file pico/sync.h
* \defgroup pico_sync pico_sync
* \brief Synchronization primitives and mutual exclusion
*/
#include "pico/sem.h"
#include "pico/mutex.h"
#include "pico/critical_section.h"
#endif

View file

@ -0,0 +1,13 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/lock_core.h"
void lock_init(lock_core_t *core, uint lock_num) {
valid_params_if(LOCK_CORE, lock_num < NUM_SPIN_LOCKS);
core->spin_lock = spin_lock_instance(lock_num);
}

View file

@ -0,0 +1,228 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/mutex.h"
#include "pico/time.h"
#include "pico/runtime_init.h"
#if !PICO_RUNTIME_NO_INIT_MUTEX
void __weak runtime_init_mutex(void) {
// this is an array of either mutex_t or recursive_mutex_t (i.e. not necessarily the same size)
// however each starts with a lock_core_t, and the spin_lock is initialized to address 1 for a recursive
// spinlock and 0 for a regular one.
static_assert(!(sizeof(mutex_t)&3), "");
static_assert(!(sizeof(recursive_mutex_t)&3), "");
static_assert(!offsetof(mutex_t, core), "");
static_assert(!offsetof(recursive_mutex_t, core), "");
extern lock_core_t __mutex_array_start;
extern lock_core_t __mutex_array_end;
for (lock_core_t *l = &__mutex_array_start; l < &__mutex_array_end; ) {
if (l->spin_lock) {
assert(1 == (uintptr_t)l->spin_lock); // indicator for a recursive mutex
recursive_mutex_t *rm = (recursive_mutex_t *)l;
recursive_mutex_init(rm);
l = &rm[1].core; // next
} else {
mutex_t *m = (mutex_t *)l;
mutex_init(m);
l = &m[1].core; // next
}
}
}
#endif
#if defined(PICO_RUNTIME_INIT_MUTEX) && !PICO_RUNTIME_SKIP_INIT_MUTEX
PICO_RUNTIME_INIT_FUNC_RUNTIME(runtime_init_mutex, PICO_RUNTIME_INIT_MUTEX);
#endif
void mutex_init(mutex_t *mtx) {
lock_init(&mtx->core, next_striped_spin_lock_num());
mtx->owner = LOCK_INVALID_OWNER_ID;
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
mtx->recursive = false;
#endif
__mem_fence_release();
}
void recursive_mutex_init(recursive_mutex_t *mtx) {
lock_init(&mtx->core, next_striped_spin_lock_num());
mtx->owner = LOCK_INVALID_OWNER_ID;
mtx->enter_count = 0;
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
mtx->recursive = true;
#endif
__mem_fence_release();
}
void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
recursive_mutex_enter_blocking(mtx);
return;
}
#endif
lock_owner_id_t caller = lock_get_caller_owner_id();
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner)) {
mtx->owner = caller;
spin_unlock(mtx->core.spin_lock, save);
break;
}
lock_internal_spin_unlock_with_wait(&mtx->core, save);
} while (true);
}
void __time_critical_func(recursive_mutex_enter_blocking)(recursive_mutex_t *mtx) {
lock_owner_id_t caller = lock_get_caller_owner_id();
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) {
mtx->owner = caller;
uint __unused total = ++mtx->enter_count;
spin_unlock(mtx->core.spin_lock, save);
assert(total); // check for overflow
return;
} else {
lock_internal_spin_unlock_with_wait(&mtx->core, save);
}
} while (true);
}
bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
return recursive_mutex_try_enter(mtx, owner_out);
}
#endif
bool entered;
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner)) {
mtx->owner = lock_get_caller_owner_id();
entered = true;
} else {
if (owner_out) *owner_out = (uint32_t) mtx->owner;
entered = false;
}
spin_unlock(mtx->core.spin_lock, save);
return entered;
}
bool __time_critical_func(mutex_try_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
// not using lock_owner_id_t to avoid backwards incompatibility change to mutex_try_enter API
static_assert(sizeof(lock_owner_id_t) <= 4, "");
uint32_t owner;
if (!mutex_try_enter(mtx, &owner)) {
if ((lock_owner_id_t)owner == lock_get_caller_owner_id()) return false; // deadlock, so we can never own it
return mutex_enter_block_until(mtx, until);
}
return true;
}
bool __time_critical_func(recursive_mutex_try_enter)(recursive_mutex_t *mtx, uint32_t *owner_out) {
bool entered;
lock_owner_id_t caller = lock_get_caller_owner_id();
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
mtx->owner = caller;
uint __unused total = ++mtx->enter_count;
assert(total); // check for overflow
entered = true;
} else {
if (owner_out) *owner_out = (uint32_t) mtx->owner;
entered = false;
}
spin_unlock(mtx->core.spin_lock, save);
return entered;
}
bool __time_critical_func(mutex_enter_timeout_ms)(mutex_t *mtx, uint32_t timeout_ms) {
return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
}
bool __time_critical_func(recursive_mutex_enter_timeout_ms)(recursive_mutex_t *mtx, uint32_t timeout_ms) {
return recursive_mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
}
bool __time_critical_func(mutex_enter_timeout_us)(mutex_t *mtx, uint32_t timeout_us) {
return mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
}
bool __time_critical_func(recursive_mutex_enter_timeout_us)(recursive_mutex_t *mtx, uint32_t timeout_us) {
return recursive_mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
}
bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
return recursive_mutex_enter_block_until(mtx, until);
}
#endif
assert(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id();
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner)) {
mtx->owner = caller;
spin_unlock(mtx->core.spin_lock, save);
return true;
} else {
if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
// timed out
return false;
}
// not timed out; spin lock already unlocked, so loop again
}
} while (true);
}
bool __time_critical_func(recursive_mutex_enter_block_until)(recursive_mutex_t *mtx, absolute_time_t until) {
assert(mtx->core.spin_lock);
lock_owner_id_t caller = lock_get_caller_owner_id();
do {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
mtx->owner = caller;
uint __unused total = ++mtx->enter_count;
spin_unlock(mtx->core.spin_lock, save);
assert(total); // check for overflow
return true;
} else {
if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
// timed out
return false;
}
// not timed out; spin lock already unlocked, so loop again
}
} while (true);
}
void __time_critical_func(mutex_exit)(mutex_t *mtx) {
#if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
if (mtx->recursive) {
recursive_mutex_exit(mtx);
return;
}
#endif
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
assert(lock_is_owner_id_valid(mtx->owner));
mtx->owner = LOCK_INVALID_OWNER_ID;
lock_internal_spin_unlock_with_notify(&mtx->core, save);
}
void __time_critical_func(recursive_mutex_exit)(recursive_mutex_t *mtx) {
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
assert(lock_is_owner_id_valid(mtx->owner));
assert(mtx->enter_count);
if (!--mtx->enter_count) {
mtx->owner = LOCK_INVALID_OWNER_ID;
lock_internal_spin_unlock_with_notify(&mtx->core, save);
} else {
spin_unlock(mtx->core.spin_lock, save);
}
}

View file

@ -0,0 +1,95 @@
/*
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "pico/sem.h"
#include "pico/time.h"
void sem_init(semaphore_t *sem, int16_t initial_permits, int16_t max_permits) {
lock_init(&sem->core, next_striped_spin_lock_num());
sem->permits = initial_permits;
sem->max_permits = max_permits;
__mem_fence_release();
}
int __time_critical_func(sem_available)(semaphore_t *sem) {
#ifdef __GNUC__
return *(volatile typeof(sem->permits) *) &sem->permits;
#else
static_assert(sizeof(sem->permits) == 2, "");
return *(volatile int16_t *) &sem->permits;
#endif
}
void __time_critical_func(sem_acquire_blocking)(semaphore_t *sem) {
do {
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (sem->permits > 0) {
sem->permits--;
spin_unlock(sem->core.spin_lock, save);
break;
}
lock_internal_spin_unlock_with_wait(&sem->core, save);
} while (true);
}
bool __time_critical_func(sem_acquire_timeout_ms)(semaphore_t *sem, uint32_t timeout_ms) {
return sem_acquire_block_until(sem, make_timeout_time_ms(timeout_ms));
}
bool __time_critical_func(sem_acquire_timeout_us)(semaphore_t *sem, uint32_t timeout_us) {
return sem_acquire_block_until(sem, make_timeout_time_us(timeout_us));
}
bool __time_critical_func(sem_acquire_block_until)(semaphore_t *sem, absolute_time_t until) {
do {
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (sem->permits > 0) {
sem->permits--;
spin_unlock(sem->core.spin_lock, save);
return true;
}
if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&sem->core, save, until)) {
return false;
}
} while (true);
}
bool __time_critical_func(sem_try_acquire)(semaphore_t *sem) {
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (sem->permits > 0) {
sem->permits--;
spin_unlock(sem->core.spin_lock, save);
return true;
}
spin_unlock(sem->core.spin_lock, save);
return false;
}
// todo this should really have a blocking variant for when permits are maxed out
bool __time_critical_func(sem_release)(semaphore_t *sem) {
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
int32_t count = sem->permits;
if (count < sem->max_permits) {
sem->permits = (int16_t)(count + 1);
lock_internal_spin_unlock_with_notify(&sem->core, save);
return true;
} else {
spin_unlock(sem->core.spin_lock, save);
return false;
}
}
void __time_critical_func(sem_reset)(semaphore_t *sem, int16_t permits) {
assert(permits >= 0 && permits <= sem->max_permits);
uint32_t save = spin_lock_blocking(sem->core.spin_lock);
if (permits > sem->permits) {
sem->permits = permits;
lock_internal_spin_unlock_with_notify(&sem->core, save);
} else {
sem->permits = permits;
spin_unlock(sem->core.spin_lock, save);
}
}