Break out core specific codegen to bindings

This commit is contained in:
Emil Fresk 2023-02-11 08:55:19 +01:00 committed by Henrik Tjäder
parent 1cda61fbda
commit 60f0342b69
16 changed files with 654 additions and 622 deletions

View file

@ -24,6 +24,7 @@ proc-macro = true
[features] [features]
default = [] default = []
debugprint = [] debugprint = []
# list of supported codegen backends # list of supported codegen backends
thumbv6 = [] thumbv6 = []
thumbv7 = [] thumbv7 = []

View file

@ -1 +0,0 @@

View file

@ -1,70 +1,7 @@
use std::collections::HashSet; use crate::codegen::bindings::architecture_specific_analysis;
use crate::syntax::{analyze::Analysis, ast::App};
use crate::syntax::ast::App;
use syn::parse; use syn::parse;
pub fn app(app: &App) -> parse::Result<()> { pub fn app(app: &App, analysis: &Analysis) -> parse::Result<()> {
// Check that external (device-specific) interrupts are not named after known (Cortex-M) architecture_specific_analysis(app, analysis)
// exceptions
for name in app.args.dispatchers.keys() {
let name_s = name.to_string();
match &*name_s {
"NonMaskableInt" | "HardFault" | "MemoryManagement" | "BusFault" | "UsageFault"
| "SecureFault" | "SVCall" | "DebugMonitor" | "PendSV" | "SysTick" => {
return Err(parse::Error::new(
name.span(),
"Cortex-M exceptions can't be used as `extern` interrupts",
));
}
_ => {}
}
}
// Check that there are enough external interrupts to dispatch the software tasks and the timer
// queue handler
let mut first = None;
let priorities = app
.software_tasks
.iter()
.map(|(name, task)| {
first = Some(name);
task.args.priority
})
.filter(|prio| *prio > 0)
.collect::<HashSet<_>>();
let need = priorities.len();
let given = app.args.dispatchers.len();
if need > given {
let s = {
format!(
"not enough interrupts to dispatch \
all software tasks (need: {need}; given: {given})"
)
};
// If not enough tasks and first still is None, may cause
// "custom attribute panicked" due to unwrap on None
return Err(parse::Error::new(first.unwrap().span(), s));
}
// Check that all exceptions are valid; only exceptions with configurable priorities are
// accepted
for (name, task) in &app.hardware_tasks {
let name_s = task.args.binds.to_string();
match &*name_s {
"NonMaskableInt" | "HardFault" => {
return Err(parse::Error::new(
name.span(),
"only exceptions with configurable priority can be used as hardware tasks",
));
}
_ => {}
}
}
Ok(())
} }

View file

@ -4,6 +4,8 @@ use quote::quote;
use crate::analyze::Analysis; use crate::analyze::Analysis;
use crate::syntax::ast::App; use crate::syntax::ast::App;
pub mod bindings;
mod assertions; mod assertions;
mod async_dispatchers; mod async_dispatchers;
mod hardware_tasks; mod hardware_tasks;

View file

@ -1,8 +1,9 @@
use proc_macro2::TokenStream as TokenStream2; use proc_macro2::TokenStream as TokenStream2;
use quote::quote; use quote::quote;
use super::bindings::extra_assertions;
use crate::analyze::Analysis;
use crate::syntax::ast::App; use crate::syntax::ast::App;
use crate::{analyze::Analysis, codegen::util};
/// Generates compile-time assertions that check that types implement the `Send` / `Sync` traits /// Generates compile-time assertions that check that types implement the `Send` / `Sync` traits
pub fn codegen(app: &App, analysis: &Analysis) -> Vec<TokenStream2> { pub fn codegen(app: &App, analysis: &Analysis) -> Vec<TokenStream2> {
@ -16,38 +17,7 @@ pub fn codegen(app: &App, analysis: &Analysis) -> Vec<TokenStream2> {
stmts.push(quote!(rtic::export::assert_sync::<#ty>();)); stmts.push(quote!(rtic::export::assert_sync::<#ty>();));
} }
let device = &app.args.device; stmts.append(&mut extra_assertions(app, analysis));
let chunks_name = util::priority_mask_chunks_ident();
let no_basepri_checks: Vec<_> = app
.hardware_tasks
.iter()
.filter_map(|(_, task)| {
if !util::is_exception(&task.args.binds) {
let interrupt_name = &task.args.binds;
Some(quote!(
if (#device::Interrupt::#interrupt_name as usize) >= (#chunks_name * 32) {
::core::panic!("An interrupt out of range is used while in armv6 or armv8m.base");
}
))
} else {
None
}
})
.collect();
let const_check = quote! {
const _CONST_CHECK: () = {
if !rtic::export::have_basepri() {
#(#no_basepri_checks)*
} else {
// TODO: Add armv7 checks here
}
};
let _ = _CONST_CHECK;
};
stmts.push(const_check);
stmts stmts
} }

View file

@ -0,0 +1,5 @@
// TODO: Feature gate
mod cortex;
// TODO: Feature gate
pub use cortex::*;

View file

@ -0,0 +1,346 @@
use crate::{
analyze::Analysis as CodegenAnalysis,
codegen::util,
syntax::{analyze::Analysis as SyntaxAnalysis, ast::App},
};
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use std::collections::HashSet;
use syn::{parse, Attribute, Ident};
// TODO: This should be feature gated
// pub use basepri::*;
pub use source_masking::*;
/// Whether `name` is an exception with configurable priority
fn is_exception(name: &Ident) -> bool {
let s = name.to_string();
matches!(
&*s,
"MemoryManagement"
| "BusFault"
| "UsageFault"
| "SecureFault"
| "SVCall"
| "DebugMonitor"
| "PendSV"
| "SysTick"
)
}
pub mod source_masking {
use super::*;
use std::collections::HashMap;
/// Generates a `Mutex` implementation
pub fn impl_mutex(
app: &App,
analysis: &CodegenAnalysis,
cfgs: &[Attribute],
resources_prefix: bool,
name: &Ident,
ty: &TokenStream2,
ceiling: u8,
ptr: &TokenStream2,
) -> TokenStream2 {
let path = if resources_prefix {
quote!(shared_resources::#name)
} else {
quote!(#name)
};
// Computing mapping of used interrupts to masks
let interrupt_ids = analysis.interrupts.iter().map(|(p, (id, _))| (p, id));
let mut prio_to_masks = HashMap::new();
let device = &app.args.device;
// let mut uses_exceptions_with_resources = false;
let mut mask_ids = Vec::new();
for (&priority, name) in interrupt_ids.chain(app.hardware_tasks.values().flat_map(|task| {
if !is_exception(&task.args.binds) {
Some((&task.args.priority, &task.args.binds))
} else {
None
}
})) {
let v: &mut Vec<_> = prio_to_masks.entry(priority - 1).or_default();
v.push(quote!(#device::Interrupt::#name as u32));
mask_ids.push(quote!(#device::Interrupt::#name as u32));
}
// Call rtic::export::create_mask([Mask; N]), where the array is the list of shifts
let mut mask_arr = Vec::new();
// NOTE: 0..3 assumes max 4 priority levels according to M0, M23 spec
for i in 0..3 {
let v = if let Some(v) = prio_to_masks.get(&i) {
v.clone()
} else {
Vec::new()
};
mask_arr.push(quote!(
rtic::export::create_mask([#(#v),*])
));
}
// if uses_exceptions_with_resources {
// mod_app.push(quote!(
// #[doc(hidden)]
// #[allow(non_upper_case_globals)]
// const __rtic_internal_V6_ERROR: () = rtic::export::no_basepri_panic();
// ));
// }
quote!(
#(#cfgs)*
impl<'a> rtic::Mutex for #path<'a> {
type T = #ty;
#[inline(always)]
fn lock<RTIC_INTERNAL_R>(&mut self, f: impl FnOnce(&mut #ty) -> RTIC_INTERNAL_R) -> RTIC_INTERNAL_R {
/// Priority ceiling
const CEILING: u8 = #ceiling;
const N_CHUNKS: usize = rtic::export::compute_mask_chunks([#(#mask_ids),*]);
const MASKS: [rtic::export::Mask<N_CHUNKS>; 3] = [#(#mask_arr),*];
unsafe {
rtic::export::lock(
#ptr,
CEILING,
&MASKS,
f,
)
}
}
}
)
}
pub fn extra_assertions(_: &App, _: &SyntaxAnalysis) -> Vec<TokenStream2> {
// let device = &app.args.device;
// let no_basepri_checks: Vec<_> = app
// .hardware_tasks
// .iter()
// .filter_map(|(_, task)| {
// if !is_exception(&task.args.binds) {
// let interrupt_name = &task.args.binds;
// Some(quote!(
// if (#device::Interrupt::#interrupt_name as usize) >= (#chunks_name * 32) {
// ::core::panic!("An interrupt out of range is used while in armv6 or armv8m.base");
// }
// ))
// } else {
// None
// }
// })
// .collect();
// let const_check = quote! {
// const _CONST_CHECK: () = {
// #(#no_basepri_checks)*
// };
// let _ = _CONST_CHECK;
// };
// vec![const_check]
vec![]
}
}
pub mod basepri {
use super::*;
/// Generates a `Mutex` implementation
pub fn impl_mutex(
app: &App,
_analysis: &CodegenAnalysis,
cfgs: &[Attribute],
resources_prefix: bool,
name: &Ident,
ty: &TokenStream2,
ceiling: u8,
ptr: &TokenStream2,
) -> TokenStream2 {
let path = if resources_prefix {
quote!(shared_resources::#name)
} else {
quote!(#name)
};
let device = &app.args.device;
quote!(
#(#cfgs)*
impl<'a> rtic::Mutex for #path<'a> {
type T = #ty;
#[inline(always)]
fn lock<RTIC_INTERNAL_R>(&mut self, f: impl FnOnce(&mut #ty) -> RTIC_INTERNAL_R) -> RTIC_INTERNAL_R {
/// Priority ceiling
const CEILING: u8 = #ceiling;
unsafe {
rtic::export::lock(
#ptr,
CEILING,
#device::NVIC_PRIO_BITS,
f,
)
}
}
}
)
}
pub fn extra_assertions(_: &App, _: &SyntaxAnalysis) -> Vec<TokenStream2> {
vec![]
}
}
pub fn pre_init_checks(app: &App, _: &SyntaxAnalysis) -> Vec<TokenStream2> {
let mut stmts = vec![];
// check that all dispatchers exists in the `Interrupt` enumeration regardless of whether
// they are used or not
let interrupt = util::interrupt_ident();
let rt_err = util::rt_err_ident();
for name in app.args.dispatchers.keys() {
stmts.push(quote!(let _ = #rt_err::#interrupt::#name;));
}
stmts
}
pub fn pre_init_enable_interrupts(app: &App, analysis: &CodegenAnalysis) -> Vec<TokenStream2> {
let mut stmts = vec![];
let interrupt = util::interrupt_ident();
let rt_err = util::rt_err_ident();
let device = &app.args.device;
let nvic_prio_bits = quote!(#device::NVIC_PRIO_BITS);
let interrupt_ids = analysis.interrupts.iter().map(|(p, (id, _))| (p, id));
// Unmask interrupts and set their priorities
for (&priority, name) in interrupt_ids.chain(app.hardware_tasks.values().filter_map(|task| {
if is_exception(&task.args.binds) {
// We do exceptions in another pass
None
} else {
Some((&task.args.priority, &task.args.binds))
}
})) {
let es = format!(
"Maximum priority used by interrupt vector '{name}' is more than supported by hardware"
);
// Compile time assert that this priority is supported by the device
stmts.push(quote!(
const _: () = if (1 << #nvic_prio_bits) < #priority as usize { ::core::panic!(#es); };
));
stmts.push(quote!(
core.NVIC.set_priority(
#rt_err::#interrupt::#name,
rtic::export::logical2hw(#priority, #nvic_prio_bits),
);
));
// NOTE unmask the interrupt *after* setting its priority: changing the priority of a pended
// interrupt is implementation defined
stmts.push(quote!(rtic::export::NVIC::unmask(#rt_err::#interrupt::#name);));
}
// Set exception priorities
for (name, priority) in app.hardware_tasks.values().filter_map(|task| {
if is_exception(&task.args.binds) {
Some((&task.args.binds, task.args.priority))
} else {
None
}
}) {
let es = format!(
"Maximum priority used by interrupt vector '{name}' is more than supported by hardware"
);
// Compile time assert that this priority is supported by the device
stmts.push(quote!(
const _: () = if (1 << #nvic_prio_bits) < #priority as usize { ::core::panic!(#es); };
));
stmts.push(quote!(core.SCB.set_priority(
rtic::export::SystemHandler::#name,
rtic::export::logical2hw(#priority, #nvic_prio_bits),
);));
}
stmts
}
pub fn architecture_specific_analysis(app: &App, _: &SyntaxAnalysis) -> parse::Result<()> {
// Check that external (device-specific) interrupts are not named after known (Cortex-M)
// exceptions
for name in app.args.dispatchers.keys() {
let name_s = name.to_string();
match &*name_s {
"NonMaskableInt" | "HardFault" | "MemoryManagement" | "BusFault" | "UsageFault"
| "SecureFault" | "SVCall" | "DebugMonitor" | "PendSV" | "SysTick" => {
return Err(parse::Error::new(
name.span(),
"Cortex-M exceptions can't be used as `extern` interrupts",
));
}
_ => {}
}
}
// Check that there are enough external interrupts to dispatch the software tasks and the timer
// queue handler
let mut first = None;
let priorities = app
.software_tasks
.iter()
.map(|(name, task)| {
first = Some(name);
task.args.priority
})
.filter(|prio| *prio > 0)
.collect::<HashSet<_>>();
let need = priorities.len();
let given = app.args.dispatchers.len();
if need > given {
let s = {
format!(
"not enough interrupts to dispatch \
all software tasks (need: {need}; given: {given})"
)
};
// If not enough tasks and first still is None, may cause
// "custom attribute panicked" due to unwrap on None
return Err(parse::Error::new(first.unwrap().span(), s));
}
// Check that all exceptions are valid; only exceptions with configurable priorities are
// accepted
for (name, task) in &app.hardware_tasks {
let name_s = task.args.binds.to_string();
match &*name_s {
"NonMaskableInt" | "HardFault" => {
return Err(parse::Error::new(
name.span(),
"only exceptions with configurable priority can be used as hardware tasks",
));
}
_ => {}
}
}
Ok(())
}

View file

@ -17,7 +17,7 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 {
match ctxt { match ctxt {
Context::Init => { Context::Init => {
fields.push(quote!( fields.push(quote!(
/// Core (Cortex-M) peripherals /// Core peripherals
pub core: rtic::export::Peripherals pub core: rtic::export::Peripherals
)); ));
@ -25,7 +25,7 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 {
let device = &app.args.device; let device = &app.args.device;
fields.push(quote!( fields.push(quote!(
/// Device peripherals /// Device peripherals (PAC)
pub device: #device::Peripherals pub device: #device::Peripherals
)); ));

View file

@ -1,15 +1,13 @@
use super::bindings::{pre_init_checks, pre_init_enable_interrupts};
use crate::analyze::Analysis;
use crate::syntax::ast::App; use crate::syntax::ast::App;
use proc_macro2::TokenStream as TokenStream2; use proc_macro2::TokenStream as TokenStream2;
use quote::quote; use quote::quote;
use crate::{analyze::Analysis, codegen::util};
/// Generates code that runs before `#[init]` /// Generates code that runs before `#[init]`
pub fn codegen(app: &App, analysis: &Analysis) -> Vec<TokenStream2> { pub fn codegen(app: &App, analysis: &Analysis) -> Vec<TokenStream2> {
let mut stmts = vec![]; let mut stmts = vec![];
let rt_err = util::rt_err_ident();
// Disable interrupts -- `init` must run with interrupts disabled // Disable interrupts -- `init` must run with interrupts disabled
stmts.push(quote!(rtic::export::interrupt::disable();)); stmts.push(quote!(rtic::export::interrupt::disable();));
@ -18,68 +16,9 @@ pub fn codegen(app: &App, analysis: &Analysis) -> Vec<TokenStream2> {
let mut core: rtic::export::Peripherals = rtic::export::Peripherals::steal().into(); let mut core: rtic::export::Peripherals = rtic::export::Peripherals::steal().into();
)); ));
let device = &app.args.device; stmts.append(&mut pre_init_checks(app, analysis));
let nvic_prio_bits = quote!(#device::NVIC_PRIO_BITS);
// check that all dispatchers exists in the `Interrupt` enumeration regardless of whether stmts.append(&mut pre_init_enable_interrupts(app, analysis));
// they are used or not
let interrupt = util::interrupt_ident();
for name in app.args.dispatchers.keys() {
stmts.push(quote!(let _ = #rt_err::#interrupt::#name;));
}
let interrupt_ids = analysis.interrupts.iter().map(|(p, (id, _))| (p, id));
// Unmask interrupts and set their priorities
for (&priority, name) in interrupt_ids.chain(app.hardware_tasks.values().filter_map(|task| {
if util::is_exception(&task.args.binds) {
// We do exceptions in another pass
None
} else {
Some((&task.args.priority, &task.args.binds))
}
})) {
let es = format!(
"Maximum priority used by interrupt vector '{name}' is more than supported by hardware"
);
// Compile time assert that this priority is supported by the device
stmts.push(quote!(
const _: () = if (1 << #nvic_prio_bits) < #priority as usize { ::core::panic!(#es); };
));
stmts.push(quote!(
core.NVIC.set_priority(
#rt_err::#interrupt::#name,
rtic::export::logical2hw(#priority, #nvic_prio_bits),
);
));
// NOTE unmask the interrupt *after* setting its priority: changing the priority of a pended
// interrupt is implementation defined
stmts.push(quote!(rtic::export::NVIC::unmask(#rt_err::#interrupt::#name);));
}
// Set exception priorities
for (name, priority) in app.hardware_tasks.values().filter_map(|task| {
if util::is_exception(&task.args.binds) {
Some((&task.args.binds, task.args.priority))
} else {
None
}
}) {
let es = format!(
"Maximum priority used by interrupt vector '{name}' is more than supported by hardware"
);
// Compile time assert that this priority is supported by the device
stmts.push(quote!(
const _: () = if (1 << #nvic_prio_bits) < #priority as usize { ::core::panic!(#es); };
));
stmts.push(quote!(core.SCB.set_priority(
rtic::export::SystemHandler::#name,
rtic::export::logical2hw(#priority, #nvic_prio_bits),
);));
}
stmts stmts
} }

View file

@ -2,7 +2,8 @@ use crate::syntax::{analyze::Ownership, ast::App};
use crate::{analyze::Analysis, codegen::util}; use crate::{analyze::Analysis, codegen::util};
use proc_macro2::TokenStream as TokenStream2; use proc_macro2::TokenStream as TokenStream2;
use quote::quote; use quote::quote;
use std::collections::HashMap;
use super::bindings::impl_mutex;
/// Generates `static` variables and shared resource proxies /// Generates `static` variables and shared resource proxies
pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 { pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
@ -75,8 +76,9 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
// For future use // For future use
// let doc = format!(" RTIC internal ({} resource): {}:{}", doc, file!(), line!()); // let doc = format!(" RTIC internal ({} resource): {}:{}", doc, file!(), line!());
mod_app.push(util::impl_mutex( mod_app.push(impl_mutex(
app, app,
analysis,
cfgs, cfgs,
true, true,
&shared_name, &shared_name,
@ -95,86 +97,6 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
}) })
}; };
// Computing mapping of used interrupts to masks
let interrupt_ids = analysis.interrupts.iter().map(|(p, (id, _))| (p, id));
let mut prio_to_masks = HashMap::new();
let device = &app.args.device;
let mut uses_exceptions_with_resources = false;
let mut mask_ids = Vec::new();
for (&priority, name) in interrupt_ids.chain(app.hardware_tasks.values().flat_map(|task| {
if !util::is_exception(&task.args.binds) {
Some((&task.args.priority, &task.args.binds))
} else {
// If any resource to the exception uses non-lock-free or non-local resources this is
// not allwed on thumbv6.
uses_exceptions_with_resources = uses_exceptions_with_resources
|| task
.args
.shared_resources
.iter()
.map(|(ident, access)| {
if access.is_exclusive() {
if let Some(r) = app.shared_resources.get(ident) {
!r.properties.lock_free
} else {
false
}
} else {
false
}
})
.any(|v| v);
None
}
})) {
let v: &mut Vec<_> = prio_to_masks.entry(priority - 1).or_default();
v.push(quote!(#device::Interrupt::#name as u32));
mask_ids.push(quote!(#device::Interrupt::#name as u32));
}
// Call rtic::export::create_mask([Mask; N]), where the array is the list of shifts
let mut mask_arr = Vec::new();
// NOTE: 0..3 assumes max 4 priority levels according to M0, M23 spec
for i in 0..3 {
let v = if let Some(v) = prio_to_masks.get(&i) {
v.clone()
} else {
Vec::new()
};
mask_arr.push(quote!(
rtic::export::create_mask([#(#v),*])
));
}
// Generate a constant for the number of chunks needed by Mask.
let chunks_name = util::priority_mask_chunks_ident();
mod_app.push(quote!(
#[doc(hidden)]
#[allow(non_upper_case_globals)]
const #chunks_name: usize = rtic::export::compute_mask_chunks([#(#mask_ids),*]);
));
let masks_name = util::priority_masks_ident();
mod_app.push(quote!(
#[doc(hidden)]
#[allow(non_upper_case_globals)]
const #masks_name: [rtic::export::Mask<#chunks_name>; 3] = [#(#mask_arr),*];
));
if uses_exceptions_with_resources {
mod_app.push(quote!(
#[doc(hidden)]
#[allow(non_upper_case_globals)]
const __rtic_internal_V6_ERROR: () = rtic::export::no_basepri_panic();
));
}
quote!( quote!(
#(#mod_app)* #(#mod_app)*

View file

@ -6,70 +6,11 @@ use syn::{Attribute, Ident, PatType};
const RTIC_INTERNAL: &str = "__rtic_internal"; const RTIC_INTERNAL: &str = "__rtic_internal";
/// Generates a `Mutex` implementation
pub fn impl_mutex(
app: &App,
cfgs: &[Attribute],
resources_prefix: bool,
name: &Ident,
ty: &TokenStream2,
ceiling: u8,
ptr: &TokenStream2,
) -> TokenStream2 {
let path = if resources_prefix {
quote!(shared_resources::#name)
} else {
quote!(#name)
};
let device = &app.args.device;
let masks_name = priority_masks_ident();
quote!(
#(#cfgs)*
impl<'a> rtic::Mutex for #path<'a> {
type T = #ty;
#[inline(always)]
fn lock<RTIC_INTERNAL_R>(&mut self, f: impl FnOnce(&mut #ty) -> RTIC_INTERNAL_R) -> RTIC_INTERNAL_R {
/// Priority ceiling
const CEILING: u8 = #ceiling;
unsafe {
rtic::export::lock(
#ptr,
CEILING,
#device::NVIC_PRIO_BITS,
&#masks_name,
f,
)
}
}
}
)
}
pub fn interrupt_ident() -> Ident { pub fn interrupt_ident() -> Ident {
let span = Span::call_site(); let span = Span::call_site();
Ident::new("interrupt", span) Ident::new("interrupt", span)
} }
/// Whether `name` is an exception with configurable priority
pub fn is_exception(name: &Ident) -> bool {
let s = name.to_string();
matches!(
&*s,
"MemoryManagement"
| "BusFault"
| "UsageFault"
| "SecureFault"
| "SVCall"
| "DebugMonitor"
| "PendSV"
| "SysTick"
)
}
/// Mark a name as internal /// Mark a name as internal
pub fn mark_internal_name(name: &str) -> Ident { pub fn mark_internal_name(name: &str) -> Ident {
Ident::new(&format!("{RTIC_INTERNAL}_{name}"), Span::call_site()) Ident::new(&format!("{RTIC_INTERNAL}_{name}"), Span::call_site())
@ -204,15 +145,6 @@ pub fn static_shared_resource_ident(name: &Ident) -> Ident {
mark_internal_name(&format!("shared_resource_{name}")) mark_internal_name(&format!("shared_resource_{name}"))
} }
/// Generates an Ident for the number of 32 bit chunks used for Mask storage.
pub fn priority_mask_chunks_ident() -> Ident {
mark_internal_name("MASK_CHUNKS")
}
pub fn priority_masks_ident() -> Ident {
mark_internal_name("MASKS")
}
pub fn static_local_resource_ident(name: &Ident) -> Ident { pub fn static_local_resource_ident(name: &Ident) -> Ident {
mark_internal_name(&format!("local_resource_{name}")) mark_internal_name(&format!("local_resource_{name}"))
} }

View file

@ -8,7 +8,6 @@ use proc_macro::TokenStream;
use std::{env, fs, path::Path}; use std::{env, fs, path::Path};
mod analyze; mod analyze;
mod bindings;
mod check; mod check;
mod codegen; mod codegen;
mod syntax; mod syntax;
@ -38,7 +37,7 @@ pub fn app(args: TokenStream, input: TokenStream) -> TokenStream {
Ok(x) => x, Ok(x) => x,
}; };
if let Err(e) = check::app(&app) { if let Err(e) = check::app(&app, &analysis) {
return e.to_compile_error().into(); return e.to_compile_error().into();
} }

View file

@ -51,7 +51,7 @@ lm3s6965 = "0.1.3"
cortex-m-semihosting = "0.5.0" cortex-m-semihosting = "0.5.0"
rtic-time = { path = "../rtic-time" } rtic-time = { path = "../rtic-time" }
rtic-channel = { path = "../rtic-channel" } rtic-channel = { path = "../rtic-channel" }
rtic-monotonics = { path = "../rtic-monotonics" } rtic-monotonics = { path = "../rtic-monotonics", features = ["cortex_m_systick"] }
[dev-dependencies.futures] [dev-dependencies.futures]
version = "0.3.26" version = "0.3.26"

View file

@ -1,89 +1,20 @@
pub use bare_metal::CriticalSection; pub use bare_metal::CriticalSection;
pub use cortex_m::{
asm::nop,
asm::wfi,
interrupt,
peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST},
Peripherals,
};
//pub use portable_atomic as atomic; //pub use portable_atomic as atomic;
pub use atomic_polyfill as atomic; pub use atomic_polyfill as atomic;
pub mod executor; pub mod executor;
/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23). // #[cfg(have_basepri)]
/// It needs to be large enough to cover all the relevant interrupts in use. pub mod cortex_basepri;
/// For M0/M0+ there are only 32 interrupts so we only need one u32 value.
/// For M23 there can be as many as 480 interrupts.
/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in
/// use at compile time and allocate enough u32 chunks to cover them.
#[derive(Copy, Clone)]
pub struct Mask<const M: usize>([u32; M]);
impl<const M: usize> core::ops::BitOrAssign for Mask<M> { // #[cfg(not(have_basepri))]
fn bitor_assign(&mut self, rhs: Self) { pub mod cortex_source_mask;
for i in 0..M {
self.0[i] |= rhs.0[i];
}
}
}
#[cfg(not(have_basepri))] /// Priority conversion, takes logical priorities 1..=N and converts it to NVIC priority.
impl<const M: usize> Mask<M> { #[inline]
/// Set a bit inside a Mask. #[must_use]
const fn set_bit(mut self, bit: u32) -> Self { pub const fn cortex_logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 {
let block = bit / 32; ((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits)
if block as usize >= M {
panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?");
}
let offset = bit - (block * 32);
self.0[block as usize] |= 1 << offset;
self
}
}
#[cfg(have_basepri)]
use cortex_m::register::basepri;
#[cfg(have_basepri)]
#[inline(always)]
pub fn run<F>(priority: u8, f: F)
where
F: FnOnce(),
{
if priority == 1 {
// If the priority of this interrupt is `1` then BASEPRI can only be `0`
f();
unsafe { basepri::write(0) }
} else {
let initial = basepri::read();
f();
unsafe { basepri::write(initial) }
}
}
#[cfg(not(have_basepri))]
#[inline(always)]
pub fn run<F>(_priority: u8, f: F)
where
F: FnOnce(),
{
f();
}
/// Const helper to check architecture
pub const fn have_basepri() -> bool {
#[cfg(have_basepri)]
{
true
}
#[cfg(not(have_basepri))]
{
false
}
} }
#[inline(always)] #[inline(always)]
@ -99,226 +30,3 @@ where
T: Sync, T: Sync,
{ {
} }
/// Lock implementation using BASEPRI and global Critical Section (CS)
///
/// # Safety
///
/// The system ceiling is raised from current to ceiling
/// by either
/// - raising the BASEPRI to the ceiling value, or
/// - disable all interrupts in case we want to
/// mask interrupts with maximum priority
///
/// Dereferencing a raw pointer inside CS
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case current
/// priority is current priority >= ceiling.
///
/// Lock Efficiency:
/// Experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - The BASEPRI value is folded to a constant at compile time
/// - CS entry, single assembly instruction to write BASEPRI
/// - CS exit, single assembly instruction to write BASEPRI
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any handwritten implementation of SRP
///
/// Limitations:
/// The current implementation reads/writes BASEPRI once
/// even in some edge cases where this may be omitted.
/// Total OH of per task is max 2 clock cycles, negligible in practice
/// but can in theory be fixed.
///
#[cfg(have_basepri)]
#[inline(always)]
pub unsafe fn lock<T, R, const M: usize>(
ptr: *mut T,
ceiling: u8,
nvic_prio_bits: u8,
_mask: &[Mask<M>; 3],
f: impl FnOnce(&mut T) -> R,
) -> R {
if ceiling == (1 << nvic_prio_bits) {
let r = interrupt::free(|_| f(&mut *ptr));
r
} else {
let current = basepri::read();
basepri::write(logical2hw(ceiling, nvic_prio_bits));
let r = f(&mut *ptr);
basepri::write(current);
r
}
}
/// Lock implementation using interrupt masking
///
/// # Safety
///
/// The system ceiling is raised from current to ceiling
/// by computing a 32 bit `mask` (1 bit per interrupt)
/// 1: ceiling >= priority > current
/// 0: else
///
/// On CS entry, `clear_enable_mask(mask)` disables interrupts
/// On CS exit, `set_enable_mask(mask)` re-enables interrupts
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case
/// current priority >= ceiling.
///
/// Dereferencing a raw pointer is done safely inside the CS
///
/// Lock Efficiency:
/// Early experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - if ceiling == (1 << nvic_prio_bits)
/// - we execute the closure in a global critical section (interrupt free)
/// - CS entry cost, single write to core register
/// - CS exit cost, single write to core register
/// else
/// - The `mask` value is folded to a constant at compile time
/// - CS entry, single write of the 32 bit `mask` to the `icer` register
/// - CS exit, single write of the 32 bit `mask` to the `iser` register
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any hand written implementation of SRP
///
/// Limitations:
/// Current implementation does not allow for tasks with shared resources
/// to be bound to exception handlers, as these cannot be masked in HW.
///
/// Possible solutions:
/// - Mask exceptions by global critical sections (interrupt::free)
/// - Temporary lower exception priority
///
/// These possible solutions are set goals for future work
#[cfg(not(have_basepri))]
#[inline(always)]
pub unsafe fn lock<T, R, const M: usize>(
ptr: *mut T,
ceiling: u8,
_nvic_prio_bits: u8,
masks: &[Mask<M>; 3],
f: impl FnOnce(&mut T) -> R,
) -> R {
if ceiling >= 4 {
// safe to manipulate outside critical section
// execute closure under protection of raised system ceiling
// safe to manipulate outside critical section
interrupt::free(|_| f(&mut *ptr))
} else {
// safe to manipulate outside critical section
let mask = compute_mask(0, ceiling, masks);
clear_enable_mask(mask);
// execute closure under protection of raised system ceiling
let r = f(&mut *ptr);
set_enable_mask(mask);
// safe to manipulate outside critical section
r
}
}
#[cfg(not(have_basepri))]
#[inline(always)]
fn compute_mask<const M: usize>(from_prio: u8, to_prio: u8, masks: &[Mask<M>; 3]) -> Mask<M> {
let mut res = Mask([0; M]);
masks[from_prio as usize..to_prio as usize]
.iter()
.for_each(|m| res |= *m);
res
}
// enables interrupts
#[cfg(not(have_basepri))]
#[inline(always)]
unsafe fn set_enable_mask<const M: usize>(mask: Mask<M>) {
for i in 0..M {
// This check should involve compile time constants and be optimized out.
if mask.0[i] != 0 {
(*NVIC::PTR).iser[i].write(mask.0[i]);
}
}
}
// disables interrupts
#[cfg(not(have_basepri))]
#[inline(always)]
unsafe fn clear_enable_mask<const M: usize>(mask: Mask<M>) {
for i in 0..M {
// This check should involve compile time constants and be optimized out.
if mask.0[i] != 0 {
(*NVIC::PTR).icer[i].write(mask.0[i]);
}
}
}
#[inline]
#[must_use]
pub fn logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 {
((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits)
}
#[cfg(have_basepri)]
pub const fn create_mask<const N: usize, const M: usize>(_: [u32; N]) -> Mask<M> {
Mask([0; M])
}
#[cfg(not(have_basepri))]
pub const fn create_mask<const N: usize, const M: usize>(list_of_shifts: [u32; N]) -> Mask<M> {
let mut mask = Mask([0; M]);
let mut i = 0;
while i < N {
let shift = list_of_shifts[i];
i += 1;
mask = mask.set_bit(shift);
}
mask
}
#[cfg(have_basepri)]
pub const fn compute_mask_chunks<const L: usize>(_: [u32; L]) -> usize {
0
}
/// Compute the number of u32 chunks needed to store the Mask value.
/// On M0, M0+ this should always end up being 1.
/// On M23 we will pick a number that allows us to store the highest index used by the code.
/// This means the amount of overhead will vary based on the actually interrupts used by the code.
#[cfg(not(have_basepri))]
pub const fn compute_mask_chunks<const L: usize>(ids: [u32; L]) -> usize {
let mut max: usize = 0;
let mut i = 0;
while i < L {
let id = ids[i] as usize;
i += 1;
if id > max {
max = id;
}
}
(max + 32) / 32
}
#[cfg(have_basepri)]
pub const fn no_basepri_panic() {
// For non-v6 all is fine
}
#[cfg(not(have_basepri))]
pub const fn no_basepri_panic() {
panic!("Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources");
}

View file

@ -0,0 +1,78 @@
use super::cortex_logical2hw;
use cortex_m::register::basepri;
pub use cortex_m::{
asm::nop,
asm::wfi,
interrupt,
peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST},
Peripherals,
};
#[inline(always)]
pub fn run<F>(priority: u8, f: F)
where
F: FnOnce(),
{
if priority == 1 {
// If the priority of this interrupt is `1` then BASEPRI can only be `0`
f();
unsafe { basepri::write(0) }
} else {
let initial = basepri::read();
f();
unsafe { basepri::write(initial) }
}
}
/// Lock implementation using BASEPRI and global Critical Section (CS)
///
/// # Safety
///
/// The system ceiling is raised from current to ceiling
/// by either
/// - raising the BASEPRI to the ceiling value, or
/// - disable all interrupts in case we want to
/// mask interrupts with maximum priority
///
/// Dereferencing a raw pointer inside CS
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case current
/// priority is current priority >= ceiling.
///
/// Lock Efficiency:
/// Experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - The BASEPRI value is folded to a constant at compile time
/// - CS entry, single assembly instruction to write BASEPRI
/// - CS exit, single assembly instruction to write BASEPRI
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any handwritten implementation of SRP
///
/// Limitations:
/// The current implementation reads/writes BASEPRI once
/// even in some edge cases where this may be omitted.
/// Total OH of per task is max 2 clock cycles, negligible in practice
/// but can in theory be fixed.
///
#[inline(always)]
pub unsafe fn lock<T, R, const M: usize>(
ptr: *mut T,
ceiling: u8,
nvic_prio_bits: u8,
f: impl FnOnce(&mut T) -> R,
) -> R {
if ceiling == (1 << nvic_prio_bits) {
let r = interrupt::free(|_| f(&mut *ptr));
r
} else {
let current = basepri::read();
basepri::write(cortex_logical2hw(ceiling, nvic_prio_bits));
let r = f(&mut *ptr);
basepri::write(current);
r
}
}

View file

@ -0,0 +1,194 @@
pub use cortex_m::{
asm::nop,
asm::wfi,
interrupt,
peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST},
Peripherals,
};
/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
/// It needs to be large enough to cover all the relevant interrupts in use.
/// For M0/M0+ there are only 32 interrupts so we only need one u32 value.
/// For M23 there can be as many as 480 interrupts.
/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in
/// use at compile time and allocate enough u32 chunks to cover them.
#[derive(Copy, Clone)]
pub struct Mask<const M: usize>([u32; M]);
pub const fn create_mask<const N: usize, const M: usize>(list_of_shifts: [u32; N]) -> Mask<M> {
let mut mask = Mask([0; M]);
let mut i = 0;
while i < N {
let shift = list_of_shifts[i];
i += 1;
mask = mask.set_bit(shift);
}
mask
}
/// Compute the number of u32 chunks needed to store the Mask value.
/// On M0, M0+ this should always end up being 1.
/// On M23 we will pick a number that allows us to store the highest index used by the code.
/// This means the amount of overhead will vary based on the actually interrupts used by the code.
pub const fn compute_mask_chunks<const L: usize>(ids: [u32; L]) -> usize {
let mut max: usize = 0;
let mut i = 0;
while i < L {
let id = ids[i] as usize;
i += 1;
if id > max {
max = id;
}
}
(max + 32) / 32
}
impl<const M: usize> Mask<M> {
/// Set a bit inside a Mask.
const fn set_bit(mut self, bit: u32) -> Self {
let block = bit / 32;
if block as usize >= M {
panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?");
}
let offset = bit - (block * 32);
self.0[block as usize] |= 1 << offset;
self
}
}
#[inline(always)]
pub fn run<F>(_priority: u8, f: F)
where
F: FnOnce(),
{
f();
}
/// Lock implementation using interrupt masking
///
/// # Safety
///
/// The system ceiling is raised from current to ceiling
/// by computing a 32 bit `mask` (1 bit per interrupt)
/// 1: ceiling >= priority > current
/// 0: else
///
/// On CS entry, `clear_enable_mask(mask)` disables interrupts
/// On CS exit, `set_enable_mask(mask)` re-enables interrupts
///
/// The priority.set/priority.get can safely be outside the CS
/// as being a context local cell (not affected by preemptions).
/// It is merely used in order to omit masking in case
/// current priority >= ceiling.
///
/// Dereferencing a raw pointer is done safely inside the CS
///
/// Lock Efficiency:
/// Early experiments validate (sub)-zero cost for CS implementation
/// (Sub)-zero as:
/// - Either zero OH (lock optimized out), or
/// - Amounting to an optimal assembly implementation
/// - if ceiling == (1 << nvic_prio_bits)
/// - we execute the closure in a global critical section (interrupt free)
/// - CS entry cost, single write to core register
/// - CS exit cost, single write to core register
/// else
/// - The `mask` value is folded to a constant at compile time
/// - CS entry, single write of the 32 bit `mask` to the `icer` register
/// - CS exit, single write of the 32 bit `mask` to the `iser` register
/// - priority.set/get optimized out (their effect not)
/// - On par or better than any hand written implementation of SRP
///
/// Limitations:
/// Current implementation does not allow for tasks with shared resources
/// to be bound to exception handlers, as these cannot be masked in HW.
///
/// Possible solutions:
/// - Mask exceptions by global critical sections (interrupt::free)
/// - Temporary lower exception priority
///
/// These possible solutions are set goals for future work
#[inline(always)]
pub unsafe fn lock<T, R, const M: usize>(
ptr: *mut T,
ceiling: u8,
masks: &[Mask<M>; 3],
f: impl FnOnce(&mut T) -> R,
) -> R {
if ceiling >= 4 {
// safe to manipulate outside critical section
// execute closure under protection of raised system ceiling
// safe to manipulate outside critical section
interrupt::free(|_| f(&mut *ptr))
} else {
// safe to manipulate outside critical section
let mask = compute_mask(0, ceiling, masks);
clear_enable_mask(mask);
// execute closure under protection of raised system ceiling
let r = f(&mut *ptr);
set_enable_mask(mask);
// safe to manipulate outside critical section
r
}
}
#[inline(always)]
pub const fn compute_mask<const M: usize>(
from_prio: u8,
to_prio: u8,
masks: &[Mask<M>; 3],
) -> Mask<M> {
let mut res = Mask([0; M]);
let mut idx = from_prio as usize;
while idx < to_prio as usize {
let mut i = 0;
while i < M {
//self.0[i] |= rhs.0[i];
res.0[i] |= masks[idx].0[i];
i += 1;
}
idx += 1;
}
// masks[from_prio as usize..to_prio as usize]
// .iter()
// .for_each(|m| res |= *m);
res
}
// enables interrupts
#[inline(always)]
unsafe fn set_enable_mask<const M: usize>(mask: Mask<M>) {
for i in 0..M {
// This check should involve compile time constants and be optimized out.
if mask.0[i] != 0 {
(*NVIC::PTR).iser[i].write(mask.0[i]);
}
}
}
// disables interrupts
#[inline(always)]
unsafe fn clear_enable_mask<const M: usize>(mask: Mask<M>) {
for i in 0..M {
// This check should involve compile time constants and be optimized out.
if mask.0[i] != 0 {
(*NVIC::PTR).icer[i].write(mask.0[i]);
}
}
}