2023-01-02 14:34:05 +01:00
pub use bare_metal ::CriticalSection ;
2018-11-03 17:02:41 +01:00
pub use cortex_m ::{
2021-12-14 21:52:57 +01:00
asm ::nop ,
2019-06-13 23:56:59 +02:00
asm ::wfi ,
interrupt ,
2021-09-28 10:18:43 +02:00
peripheral ::{ scb ::SystemHandler , DWT , NVIC , SCB , SYST } ,
2019-06-13 23:56:59 +02:00
Peripherals ,
2018-11-03 17:02:41 +01:00
} ;
2023-01-09 21:02:53 +01:00
//pub use portable_atomic as atomic;
pub use atomic_polyfill as atomic ;
2023-01-02 14:34:05 +01:00
2023-01-09 09:48:39 +01:00
pub mod executor ;
2023-01-02 14:34:05 +01:00
2022-07-03 18:24:11 +02:00
/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
/// It needs to be large enough to cover all the relevant interrupts in use.
/// For M0/M0+ there are only 32 interrupts so we only need one u32 value.
/// For M23 there can be as many as 480 interrupts.
/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in
/// use at compile time and allocate enough u32 chunks to cover them.
#[ derive(Copy, Clone) ]
pub struct Mask < const M : usize > ( [ u32 ; M ] ) ;
impl < const M : usize > core ::ops ::BitOrAssign for Mask < M > {
fn bitor_assign ( & mut self , rhs : Self ) {
for i in 0 .. M {
self . 0 [ i ] | = rhs . 0 [ i ] ;
}
}
}
#[ cfg(not(have_basepri)) ]
impl < const M : usize > Mask < M > {
/// Set a bit inside a Mask.
const fn set_bit ( mut self , bit : u32 ) -> Self {
let block = bit / 32 ;
if block as usize > = M {
panic! ( " Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU? " ) ;
}
let offset = bit - ( block * 32 ) ;
self . 0 [ block as usize ] | = 1 < < offset ;
self
}
}
#[ cfg(have_basepri) ]
2021-09-14 16:13:28 +02:00
use cortex_m ::register ::basepri ;
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
2018-11-03 17:02:41 +01:00
#[ inline(always) ]
2019-04-21 20:02:59 +02:00
pub fn run < F > ( priority : u8 , f : F )
2018-11-03 17:02:41 +01:00
where
F : FnOnce ( ) ,
{
2019-04-21 20:02:59 +02:00
if priority = = 1 {
2020-09-01 19:04:55 +02:00
// If the priority of this interrupt is `1` then BASEPRI can only be `0`
2019-04-21 20:02:59 +02:00
f ( ) ;
unsafe { basepri ::write ( 0 ) }
} else {
let initial = basepri ::read ( ) ;
f ( ) ;
unsafe { basepri ::write ( initial ) }
}
2018-11-03 17:02:41 +01:00
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2018-11-03 17:02:41 +01:00
#[ inline(always) ]
2019-04-21 20:02:59 +02:00
pub fn run < F > ( _priority : u8 , f : F )
2018-11-03 17:02:41 +01:00
where
F : FnOnce ( ) ,
{
f ( ) ;
}
2022-01-21 21:49:45 +01:00
/// Const helper to check architecture
2022-07-03 18:24:11 +02:00
pub const fn have_basepri ( ) -> bool {
#[ cfg(have_basepri) ]
2022-01-21 21:49:45 +01:00
{
2022-07-03 18:24:11 +02:00
true
2022-01-21 21:49:45 +01:00
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
{
2022-07-03 18:24:11 +02:00
false
2022-01-21 21:49:45 +01:00
}
}
2018-11-03 17:02:41 +01:00
#[ inline(always) ]
pub fn assert_send < T > ( )
where
T : Send ,
{
}
#[ inline(always) ]
pub fn assert_sync < T > ( )
where
T : Sync ,
{
}
2018-11-04 18:50:42 +01:00
2022-01-21 21:49:45 +01:00
/// Lock implementation using BASEPRI and global Critical Section (CS)
2020-10-13 16:16:33 +02:00
///
/// # Safety
///
2022-01-21 21:49:45 +01:00
/// The system ceiling is raised from current to ceiling
/// by either
/// - raising the BASEPRI to the ceiling value, or
/// - disable all interrupts in case we want to
/// mask interrupts with maximum priority
///
/// Dereferencing a raw pointer inside CS
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case current
/// priority is current priority >= ceiling.
///
/// Lock Efficiency:
/// Experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - The BASEPRI value is folded to a constant at compile time
/// - CS entry, single assembly instruction to write BASEPRI
/// - CS exit, single assembly instruction to write BASEPRI
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any handwritten implementation of SRP
///
/// Limitations:
/// The current implementation reads/writes BASEPRI once
/// even in some edge cases where this may be omitted.
/// Total OH of per task is max 2 clock cycles, negligible in practice
/// but can in theory be fixed.
///
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
2018-11-04 18:50:42 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
pub unsafe fn lock < T , R , const M : usize > (
2018-11-04 18:50:42 +01:00
ptr : * mut T ,
ceiling : u8 ,
nvic_prio_bits : u8 ,
2022-07-03 18:24:11 +02:00
_mask : & [ Mask < M > ; 3 ] ,
2019-04-21 20:02:59 +02:00
f : impl FnOnce ( & mut T ) -> R ,
) -> R {
2023-01-07 11:24:13 +01:00
if ceiling = = ( 1 < < nvic_prio_bits ) {
let r = interrupt ::free ( | _ | f ( & mut * ptr ) ) ;
r
2018-11-04 18:50:42 +01:00
} else {
2023-01-07 11:24:13 +01:00
let current = basepri ::read ( ) ;
basepri ::write ( logical2hw ( ceiling , nvic_prio_bits ) ) ;
let r = f ( & mut * ptr ) ;
2023-01-07 14:38:04 +01:00
basepri ::write ( current ) ;
2023-01-07 11:24:13 +01:00
r
2018-11-04 18:50:42 +01:00
}
}
2022-01-21 21:49:45 +01:00
/// Lock implementation using interrupt masking
2020-10-13 16:16:33 +02:00
///
/// # Safety
///
2022-01-21 21:49:45 +01:00
/// The system ceiling is raised from current to ceiling
/// by computing a 32 bit `mask` (1 bit per interrupt)
/// 1: ceiling >= priority > current
/// 0: else
///
/// On CS entry, `clear_enable_mask(mask)` disables interrupts
/// On CS exit, `set_enable_mask(mask)` re-enables interrupts
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case
/// current priority >= ceiling.
///
/// Dereferencing a raw pointer is done safely inside the CS
///
/// Lock Efficiency:
/// Early experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - if ceiling == (1 << nvic_prio_bits)
/// - we execute the closure in a global critical section (interrupt free)
/// - CS entry cost, single write to core register
/// - CS exit cost, single write to core register
/// else
/// - The `mask` value is folded to a constant at compile time
/// - CS entry, single write of the 32 bit `mask` to the `icer` register
/// - CS exit, single write of the 32 bit `mask` to the `iser` register
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any hand written implementation of SRP
///
/// Limitations:
/// Current implementation does not allow for tasks with shared resources
/// to be bound to exception handlers, as these cannot be masked in HW.
///
/// Possible solutions:
/// - Mask exceptions by global critical sections (interrupt::free)
/// - Temporary lower exception priority
///
/// These possible solutions are set goals for future work
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2018-11-04 18:50:42 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
pub unsafe fn lock < T , R , const M : usize > (
2018-11-04 18:50:42 +01:00
ptr : * mut T ,
ceiling : u8 ,
_nvic_prio_bits : u8 ,
2022-07-03 18:24:11 +02:00
masks : & [ Mask < M > ; 3 ] ,
2019-04-21 20:02:59 +02:00
f : impl FnOnce ( & mut T ) -> R ,
) -> R {
2023-01-07 11:24:13 +01:00
if ceiling > = 4 {
// safe to manipulate outside critical section
// execute closure under protection of raised system ceiling
2023-01-08 21:33:44 +01:00
2023-01-07 11:24:13 +01:00
// safe to manipulate outside critical section
2023-01-08 21:30:53 +01:00
interrupt ::free ( | _ | f ( & mut * ptr ) )
2018-11-04 18:50:42 +01:00
} else {
2023-01-07 11:24:13 +01:00
// safe to manipulate outside critical section
let mask = compute_mask ( 0 , ceiling , masks ) ;
clear_enable_mask ( mask ) ;
// execute closure under protection of raised system ceiling
let r = f ( & mut * ptr ) ;
set_enable_mask ( mask ) ;
// safe to manipulate outside critical section
r
2018-11-04 18:50:42 +01:00
}
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
fn compute_mask < const M : usize > ( from_prio : u8 , to_prio : u8 , masks : & [ Mask < M > ; 3 ] ) -> Mask < M > {
let mut res = Mask ( [ 0 ; M ] ) ;
2022-01-21 21:49:45 +01:00
masks [ from_prio as usize .. to_prio as usize ]
. iter ( )
2022-07-03 18:24:11 +02:00
. for_each ( | m | res | = * m ) ;
2022-01-21 21:49:45 +01:00
res
}
// enables interrupts
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
unsafe fn set_enable_mask < const M : usize > ( mask : Mask < M > ) {
for i in 0 .. M {
// This check should involve compile time constants and be optimized out.
if mask . 0 [ i ] ! = 0 {
( * NVIC ::PTR ) . iser [ i ] . write ( mask . 0 [ i ] ) ;
}
}
2022-01-21 21:49:45 +01:00
}
// disables interrupts
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
2022-01-21 21:49:45 +01:00
#[ inline(always) ]
2022-07-03 18:24:11 +02:00
unsafe fn clear_enable_mask < const M : usize > ( mask : Mask < M > ) {
for i in 0 .. M {
// This check should involve compile time constants and be optimized out.
if mask . 0 [ i ] ! = 0 {
( * NVIC ::PTR ) . icer [ i ] . write ( mask . 0 [ i ] ) ;
}
}
2022-01-21 21:49:45 +01:00
}
2018-11-04 18:50:42 +01:00
#[ inline ]
2022-02-18 19:38:48 +01:00
#[ must_use ]
2019-04-21 20:02:59 +02:00
pub fn logical2hw ( logical : u8 , nvic_prio_bits : u8 ) -> u8 {
2018-11-04 18:50:42 +01:00
( ( 1 < < nvic_prio_bits ) - logical ) < < ( 8 - nvic_prio_bits )
}
2022-04-20 10:46:03 +02:00
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
pub const fn create_mask < const N : usize , const M : usize > ( _ : [ u32 ; N ] ) -> Mask < M > {
Mask ( [ 0 ; M ] )
2022-04-20 10:46:03 +02:00
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
pub const fn create_mask < const N : usize , const M : usize > ( list_of_shifts : [ u32 ; N ] ) -> Mask < M > {
let mut mask = Mask ( [ 0 ; M ] ) ;
2022-04-20 10:46:03 +02:00
let mut i = 0 ;
while i < N {
let shift = list_of_shifts [ i ] ;
i + = 1 ;
2022-07-03 18:24:11 +02:00
mask = mask . set_bit ( shift ) ;
2022-04-20 10:46:03 +02:00
}
mask
}
2022-04-20 13:02:55 +02:00
2022-07-03 18:24:11 +02:00
#[ cfg(have_basepri) ]
pub const fn compute_mask_chunks < const L : usize > ( _ : [ u32 ; L ] ) -> usize {
0
}
/// Compute the number of u32 chunks needed to store the Mask value.
/// On M0, M0+ this should always end up being 1.
/// On M23 we will pick a number that allows us to store the highest index used by the code.
/// This means the amount of overhead will vary based on the actually interrupts used by the code.
#[ cfg(not(have_basepri)) ]
pub const fn compute_mask_chunks < const L : usize > ( ids : [ u32 ; L ] ) -> usize {
let mut max : usize = 0 ;
let mut i = 0 ;
while i < L {
let id = ids [ i ] as usize ;
i + = 1 ;
if id > max {
max = id ;
}
}
( max + 32 ) / 32
}
#[ cfg(have_basepri) ]
pub const fn no_basepri_panic ( ) {
2022-04-20 13:02:55 +02:00
// For non-v6 all is fine
}
2022-07-03 18:24:11 +02:00
#[ cfg(not(have_basepri)) ]
pub const fn no_basepri_panic ( ) {
panic! ( " Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources " ) ;
2022-04-20 13:02:55 +02:00
}