2023-01-02 14:34:05 +01:00
pub use bare_metal ::CriticalSection ;
2019-06-13 23:56:59 +02:00
use core ::{
cell ::Cell ,
sync ::atomic ::{ AtomicBool , Ordering } ,
} ;
2018-11-03 17:02:41 +01:00
pub use cortex_m ::{
2021-12-14 21:52:57 +01:00
asm ::nop ,
2019-06-13 23:56:59 +02:00
asm ::wfi ,
interrupt ,
2021-09-28 10:18:43 +02:00
peripheral ::{ scb ::SystemHandler , DWT , NVIC , SCB , SYST } ,
2019-06-13 23:56:59 +02:00
Peripherals ,
2018-11-03 17:02:41 +01:00
} ;
2023-01-02 14:34:05 +01:00
pub mod executor {
use core ::{
future ::Future ,
mem ,
pin ::Pin ,
task ::{ Context , Poll , RawWaker , RawWakerVTable , Waker } ,
} ;
static WAKER_VTABLE : RawWakerVTable =
RawWakerVTable ::new ( waker_clone , waker_wake , waker_wake , waker_drop ) ;
unsafe fn waker_clone ( p : * const ( ) ) -> RawWaker {
RawWaker ::new ( p , & WAKER_VTABLE )
}
unsafe fn waker_wake ( p : * const ( ) ) {
// The only thing we need from a waker is the function to call to pend the async
// dispatcher.
let f : fn ( ) = mem ::transmute ( p ) ;
f ( ) ;
}
unsafe fn waker_drop ( _ : * const ( ) ) {
// nop
}
//============
// AsyncTaskExecutor
pub struct AsyncTaskExecutor < F : Future + 'static > {
task : Option < F > ,
}
impl < F : Future + 'static > AsyncTaskExecutor < F > {
pub const fn new ( ) -> Self {
Self { task : None }
}
pub fn is_running ( & self ) -> bool {
self . task . is_some ( )
}
pub fn spawn ( & mut self , future : F ) {
self . task = Some ( future ) ;
}
pub fn poll ( & mut self , wake : fn ( ) ) -> bool {
if let Some ( future ) = & mut self . task {
unsafe {
let waker = Waker ::from_raw ( RawWaker ::new ( wake as * const ( ) , & WAKER_VTABLE ) ) ;
let mut cx = Context ::from_waker ( & waker ) ;
let future = Pin ::new_unchecked ( future ) ;
match future . poll ( & mut cx ) {
Poll ::Ready ( _ ) = > {
self . task = None ;
true // Only true if we finished now
}
Poll ::Pending = > false ,
}
}
} else {
false
}
}
}
}
2022-07-03 18:24:11 +02:00
/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
/// It needs to be large enough to cover all the relevant interrupts in use.
/// For M0/M0+ there are only 32 interrupts so we only need one u32 value.
/// For M23 there can be as many as 480 interrupts.
/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in
/// use at compile time and allocate enough u32 chunks to cover them.
#[ derive(Copy, Clone) ]
pub struct Mask < const M : usize > ( [ u32 ; M ] ) ;
impl < const M : usize > core ::ops ::BitOrAssign for Mask < M > {
fn bitor_assign ( & mut self , rhs : Self ) {
for i in 0 .. M {
self . 0 [ i ] | = rhs . 0 [ i ] ;
}
}
}
#[ cfg(not(have_basepri)) ]
impl < const M : usize > Mask < M > {
/// Set a bit inside a Mask.
const fn set_bit ( mut self , bit : u32 ) -> Self {
let block = bit / 32 ;
if block as usize > = M {
panic! ( " Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU? " ) ;
}
let offset = bit - ( block * 32 ) ;
self . 0 [ block as usize ] | = 1 < < offset ;
self
}
}
#[ cfg(have_basepri) ]
2021-09-14 16:13:28 +02:00
use cortex_m ::register ::basepri ;
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
2018-11-03 17:02:41 +01:00
#[ inline(always) ]
2019-04-21 20:02:59 +02:00
pub fn run < F > ( priority : u8 , f : F )
2018-11-03 17:02:41 +01:00
where
F : FnOnce ( ) ,
{
2019-04-21 20:02:59 +02:00
if priority = = 1 {
2020-09-01 19:04:55 +02:00
// If the priority of this interrupt is `1` then BASEPRI can only be `0`
2019-04-21 20:02:59 +02:00
f ( ) ;
unsafe { basepri ::write ( 0 ) }
} else {
let initial = basepri ::read ( ) ;
f ( ) ;
unsafe { basepri ::write ( initial ) }
}
2018-11-03 17:02:41 +01:00
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2018-11-03 17:02:41 +01:00
#[ inline(always) ]
2019-04-21 20:02:59 +02:00
pub fn run < F > ( _priority : u8 , f : F )
2018-11-03 17:02:41 +01:00
where
F : FnOnce ( ) ,
{
f ( ) ;
}
2019-06-13 23:56:59 +02:00
pub struct Barrier {
inner : AtomicBool ,
}
impl Barrier {
pub const fn new ( ) -> Self {
Barrier {
inner : AtomicBool ::new ( false ) ,
}
}
pub fn release ( & self ) {
2022-02-18 19:38:48 +01:00
self . inner . store ( true , Ordering ::Release ) ;
2019-06-13 23:56:59 +02:00
}
pub fn wait ( & self ) {
2022-05-24 05:51:44 +02:00
while ! self . inner . load ( Ordering ::Acquire ) {
core ::hint ::spin_loop ( )
}
2019-06-13 23:56:59 +02:00
}
}
2019-02-16 00:22:00 +01:00
// Newtype over `Cell` that forbids mutation through a shared reference
pub struct Priority {
inner : Cell < u8 > ,
}
impl Priority {
2020-10-13 16:16:33 +02:00
/// Create a new Priority
///
/// # Safety
///
/// Will overwrite the current Priority
2019-02-16 00:22:00 +01:00
#[ inline(always) ]
2023-01-02 14:34:05 +01:00
pub const unsafe fn new ( value : u8 ) -> Self {
2019-02-16 00:26:07 +01:00
Priority {
inner : Cell ::new ( value ) ,
}
2019-02-16 00:22:00 +01:00
}
2020-10-13 16:16:33 +02:00
/// Change the current priority to `value`
2020-09-01 19:04:55 +02:00
// These two methods are used by `lock` (see below) but can't be used from the RTIC application
2019-02-16 00:22:00 +01:00
#[ inline(always) ]
fn set ( & self , value : u8 ) {
2022-02-18 19:38:48 +01:00
self . inner . set ( value ) ;
2019-02-16 00:22:00 +01:00
}
2020-10-13 16:16:33 +02:00
/// Get the current priority
2019-02-16 00:22:00 +01:00
#[ inline(always) ]
fn get ( & self ) -> u8 {
self . inner . get ( )
}
}
2022-01-21 21:49:45 +01:00
/// Const helper to check architecture
2022-07-03 18:24:11 +02:00
pub const fn have_basepri ( ) -> bool {
#[ cfg(have_basepri) ]
2022-01-21 21:49:45 +01:00
{
2022-07-03 18:24:11 +02:00
true
2022-01-21 21:49:45 +01:00
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
{
2022-07-03 18:24:11 +02:00
false
2022-01-21 21:49:45 +01:00
}
}
2018-11-03 17:02:41 +01:00
#[ inline(always) ]
pub fn assert_send < T > ( )
where
T : Send ,
{
}
#[ inline(always) ]
pub fn assert_sync < T > ( )
where
T : Sync ,
{
}
2018-11-04 18:50:42 +01:00
2022-01-21 21:49:45 +01:00
/// Lock implementation using BASEPRI and global Critical Section (CS)
2020-10-13 16:16:33 +02:00
///
/// # Safety
///
2022-01-21 21:49:45 +01:00
/// The system ceiling is raised from current to ceiling
/// by either
/// - raising the BASEPRI to the ceiling value, or
/// - disable all interrupts in case we want to
/// mask interrupts with maximum priority
///
/// Dereferencing a raw pointer inside CS
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case current
/// priority is current priority >= ceiling.
///
/// Lock Efficiency:
/// Experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - The BASEPRI value is folded to a constant at compile time
/// - CS entry, single assembly instruction to write BASEPRI
/// - CS exit, single assembly instruction to write BASEPRI
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any handwritten implementation of SRP
///
/// Limitations:
/// The current implementation reads/writes BASEPRI once
/// even in some edge cases where this may be omitted.
/// Total OH of per task is max 2 clock cycles, negligible in practice
/// but can in theory be fixed.
///
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
2018-11-04 18:50:42 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
pub unsafe fn lock < T , R , const M : usize > (
2018-11-04 18:50:42 +01:00
ptr : * mut T ,
2019-02-16 00:22:00 +01:00
priority : & Priority ,
2018-11-04 18:50:42 +01:00
ceiling : u8 ,
nvic_prio_bits : u8 ,
2022-07-03 18:24:11 +02:00
_mask : & [ Mask < M > ; 3 ] ,
2019-04-21 20:02:59 +02:00
f : impl FnOnce ( & mut T ) -> R ,
) -> R {
2018-11-04 18:50:42 +01:00
let current = priority . get ( ) ;
2019-04-21 20:02:59 +02:00
if current < ceiling {
2018-11-04 18:50:42 +01:00
if ceiling = = ( 1 < < nvic_prio_bits ) {
2019-06-13 23:56:59 +02:00
priority . set ( u8 ::max_value ( ) ) ;
2018-11-04 18:50:42 +01:00
let r = interrupt ::free ( | _ | f ( & mut * ptr ) ) ;
priority . set ( current ) ;
r
} else {
priority . set ( ceiling ) ;
basepri ::write ( logical2hw ( ceiling , nvic_prio_bits ) ) ;
let r = f ( & mut * ptr ) ;
basepri ::write ( logical2hw ( current , nvic_prio_bits ) ) ;
priority . set ( current ) ;
r
}
} else {
f ( & mut * ptr )
}
}
2022-01-21 21:49:45 +01:00
/// Lock implementation using interrupt masking
2020-10-13 16:16:33 +02:00
///
/// # Safety
///
2022-01-21 21:49:45 +01:00
/// The system ceiling is raised from current to ceiling
/// by computing a 32 bit `mask` (1 bit per interrupt)
/// 1: ceiling >= priority > current
/// 0: else
///
/// On CS entry, `clear_enable_mask(mask)` disables interrupts
/// On CS exit, `set_enable_mask(mask)` re-enables interrupts
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case
/// current priority >= ceiling.
///
/// Dereferencing a raw pointer is done safely inside the CS
///
/// Lock Efficiency:
/// Early experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - if ceiling == (1 << nvic_prio_bits)
/// - we execute the closure in a global critical section (interrupt free)
/// - CS entry cost, single write to core register
/// - CS exit cost, single write to core register
/// else
/// - The `mask` value is folded to a constant at compile time
/// - CS entry, single write of the 32 bit `mask` to the `icer` register
/// - CS exit, single write of the 32 bit `mask` to the `iser` register
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any hand written implementation of SRP
///
/// Limitations:
/// Current implementation does not allow for tasks with shared resources
/// to be bound to exception handlers, as these cannot be masked in HW.
///
/// Possible solutions:
/// - Mask exceptions by global critical sections (interrupt::free)
/// - Temporary lower exception priority
///
/// These possible solutions are set goals for future work
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2018-11-04 18:50:42 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
pub unsafe fn lock < T , R , const M : usize > (
2018-11-04 18:50:42 +01:00
ptr : * mut T ,
2019-02-16 00:22:00 +01:00
priority : & Priority ,
2018-11-04 18:50:42 +01:00
ceiling : u8 ,
_nvic_prio_bits : u8 ,
2022-07-03 18:24:11 +02:00
masks : & [ Mask < M > ; 3 ] ,
2019-04-21 20:02:59 +02:00
f : impl FnOnce ( & mut T ) -> R ,
) -> R {
2018-11-04 18:50:42 +01:00
let current = priority . get ( ) ;
2019-04-21 20:02:59 +02:00
if current < ceiling {
2022-01-21 21:49:45 +01:00
if ceiling > = 4 {
// safe to manipulate outside critical section
priority . set ( ceiling ) ;
// execute closure under protection of raised system ceiling
let r = interrupt ::free ( | _ | f ( & mut * ptr ) ) ;
// safe to manipulate outside critical section
priority . set ( current ) ;
r
} else {
// safe to manipulate outside critical section
priority . set ( ceiling ) ;
let mask = compute_mask ( current , ceiling , masks ) ;
clear_enable_mask ( mask ) ;
// execute closure under protection of raised system ceiling
let r = f ( & mut * ptr ) ;
set_enable_mask ( mask ) ;
// safe to manipulate outside critical section
priority . set ( current ) ;
r
}
2018-11-04 18:50:42 +01:00
} else {
2022-01-21 21:49:45 +01:00
// execute closure without raising system ceiling
2018-11-04 18:50:42 +01:00
f ( & mut * ptr )
}
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
fn compute_mask < const M : usize > ( from_prio : u8 , to_prio : u8 , masks : & [ Mask < M > ; 3 ] ) -> Mask < M > {
let mut res = Mask ( [ 0 ; M ] ) ;
2022-01-21 21:49:45 +01:00
masks [ from_prio as usize .. to_prio as usize ]
. iter ( )
2022-07-03 18:24:11 +02:00
. for_each ( | m | res | = * m ) ;
2022-01-21 21:49:45 +01:00
res
}
// enables interrupts
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
unsafe fn set_enable_mask < const M : usize > ( mask : Mask < M > ) {
for i in 0 .. M {
// This check should involve compile time constants and be optimized out.
if mask . 0 [ i ] ! = 0 {
( * NVIC ::PTR ) . iser [ i ] . write ( mask . 0 [ i ] ) ;
}
}
2022-01-21 21:49:45 +01:00
}
// disables interrupts
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
unsafe fn clear_enable_mask < const M : usize > ( mask : Mask < M > ) {
for i in 0 .. M {
// This check should involve compile time constants and be optimized out.
if mask . 0 [ i ] ! = 0 {
( * NVIC ::PTR ) . icer [ i ] . write ( mask . 0 [ i ] ) ;
}
}
2022-01-21 21:49:45 +01:00
}
2018-11-04 18:50:42 +01:00
#[ inline ]
2022-02-18 19:38:48 +01:00
#[ must_use ]
2019-04-21 20:02:59 +02:00
pub fn logical2hw ( logical : u8 , nvic_prio_bits : u8 ) -> u8 {
2018-11-04 18:50:42 +01:00
( ( 1 < < nvic_prio_bits ) - logical ) < < ( 8 - nvic_prio_bits )
}
2022-04-20 10:46:03 +02:00
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
pub const fn create_mask < const N : usize , const M : usize > ( _ : [ u32 ; N ] ) -> Mask < M > {
Mask ( [ 0 ; M ] )
2022-04-20 10:46:03 +02:00
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
pub const fn create_mask < const N : usize , const M : usize > ( list_of_shifts : [ u32 ; N ] ) -> Mask < M > {
let mut mask = Mask ( [ 0 ; M ] ) ;
2022-04-20 10:46:03 +02:00
let mut i = 0 ;
while i < N {
let shift = list_of_shifts [ i ] ;
i + = 1 ;
2022-07-03 18:24:11 +02:00
mask = mask . set_bit ( shift ) ;
2022-04-20 10:46:03 +02:00
}
mask
}
2022-04-20 13:02:55 +02:00
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
pub const fn compute_mask_chunks < const L : usize > ( _ : [ u32 ; L ] ) -> usize {
0
}
/// Compute the number of u32 chunks needed to store the Mask value.
/// On M0, M0+ this should always end up being 1.
/// On M23 we will pick a number that allows us to store the highest index used by the code.
/// This means the amount of overhead will vary based on the actually interrupts used by the code.
#[ cfg(not(have_basepri)) ]
pub const fn compute_mask_chunks < const L : usize > ( ids : [ u32 ; L ] ) -> usize {
let mut max : usize = 0 ;
let mut i = 0 ;
while i < L {
let id = ids [ i ] as usize ;
i + = 1 ;
if id > max {
max = id ;
}
}
( max + 32 ) / 32
}
#[ cfg(have_basepri) ]
pub const fn no_basepri_panic ( ) {
2022-04-20 13:02:55 +02:00
// For non-v6 all is fine
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
pub const fn no_basepri_panic ( ) {
panic! ( " Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources " ) ;
2022-04-20 13:02:55 +02:00
}