From 11f01644484700bc3b21ad7523ff03f2ae250655 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 8 Jan 2023 21:10:06 +0100 Subject: [PATCH] Support 0 prio tasks --- ci/expected/zero-prio-task.run | 3 + examples/zero-prio-task.rs | 56 +++++++++++++++++ macros/src/analyze.rs | 1 + macros/src/check.rs | 1 + macros/src/codegen/async_dispatchers.rs | 81 +++++++++++++++++-------- macros/src/codegen/main.rs | 13 ++-- macros/src/codegen/module.rs | 17 +++--- macros/src/codegen/util.rs | 4 ++ macros/src/syntax/analyze.rs | 33 +++++----- 9 files changed, 154 insertions(+), 55 deletions(-) create mode 100644 ci/expected/zero-prio-task.run create mode 100644 examples/zero-prio-task.rs diff --git a/ci/expected/zero-prio-task.run b/ci/expected/zero-prio-task.run new file mode 100644 index 0000000000..123b0f2687 --- /dev/null +++ b/ci/expected/zero-prio-task.run @@ -0,0 +1,3 @@ +init +hello from async +hello from async2 diff --git a/examples/zero-prio-task.rs b/examples/zero-prio-task.rs new file mode 100644 index 0000000000..fc385092c4 --- /dev/null +++ b/examples/zero-prio-task.rs @@ -0,0 +1,56 @@ +#![no_main] +#![no_std] +#![feature(type_alias_impl_trait)] + +use core::marker::PhantomData; +use panic_semihosting as _; + +pub struct NotSend { + _0: PhantomData<*const ()>, +} + +#[rtic::app(device = lm3s6965, peripherals = true)] +mod app { + use super::NotSend; + use core::marker::PhantomData; + use cortex_m_semihosting::{debug, hprintln}; + + #[shared] + struct Shared { + x: NotSend, + } + + #[local] + struct Local { + y: NotSend, + } + + #[init] + fn init(_cx: init::Context) -> (Shared, Local) { + hprintln!("init"); + + async_task::spawn().unwrap(); + async_task2::spawn().unwrap(); + + ( + Shared { + x: NotSend { _0: PhantomData }, + }, + Local { + y: NotSend { _0: PhantomData }, + }, + ) + } + + #[task(priority = 0, shared = [x], local = [y])] + async fn async_task(_: async_task::Context) { + hprintln!("hello from async"); + } + + #[task(priority = 0, shared = [x])] + async fn async_task2(_: async_task2::Context) { + hprintln!("hello from async2"); + + debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator + } +} diff --git a/macros/src/analyze.rs b/macros/src/analyze.rs index cb42ad6f2a..65774f6c4d 100644 --- a/macros/src/analyze.rs +++ b/macros/src/analyze.rs @@ -36,6 +36,7 @@ pub fn app(analysis: analyze::Analysis, app: &App) -> Analysis { let interrupts: BTreeMap = priorities .iter() + .filter(|prio| **prio > 0) // 0 prio tasks are run in main .copied() .rev() .map(|p| (p, available_interrupt.pop().expect("UNREACHABLE"))) diff --git a/macros/src/check.rs b/macros/src/check.rs index 312b84d5f0..72d0a27024 100644 --- a/macros/src/check.rs +++ b/macros/src/check.rs @@ -32,6 +32,7 @@ pub fn app(app: &App) -> parse::Result<()> { first = Some(name); task.args.priority }) + .filter(|prio| *prio > 0) .collect::>(); let need = priorities.len(); diff --git a/macros/src/codegen/async_dispatchers.rs b/macros/src/codegen/async_dispatchers.rs index 62b17fee4a..f6408e1edf 100644 --- a/macros/src/codegen/async_dispatchers.rs +++ b/macros/src/codegen/async_dispatchers.rs @@ -26,9 +26,22 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 { for (&level, channel) in &analysis.channels { let mut stmts = vec![]; - let device = &app.args.device; - let enum_ = util::interrupt_ident(); - let interrupt = util::suffixed(&interrupts[&level].0.to_string()); + + let dispatcher_name = if level > 0 { + util::suffixed(&interrupts.get(&level).expect("UNREACHABLE").0.to_string()) + } else { + util::zero_prio_dispatcher_ident() + }; + + let pend_interrupt = if level > 0 { + let device = &app.args.device; + let enum_ = util::interrupt_ident(); + + quote!(rtic::pend(#device::#enum_::#dispatcher_name);) + } else { + // For 0 priority tasks we don't need to pend anything + quote!() + }; for name in channel.tasks.iter() { let exec_name = util::internal_task_ident(name, "EXEC"); @@ -60,40 +73,56 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 { #executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed); if (&mut *#exec_name.get_mut()).poll(|| { #executor_run_ident.store(true, core::sync::atomic::Ordering::Release); - rtic::pend(#device::#enum_::#interrupt); + #pend_interrupt }) && #rq.load(core::sync::atomic::Ordering::Relaxed) { // If the ready queue is not empty and the executor finished, restart this // dispatch to check if the executor should be restarted. - rtic::pend(#device::#enum_::#interrupt); + #pend_interrupt } } )); } - let doc = format!( - "Interrupt handler to dispatch async tasks at priority {}", - level - ); - let attribute = &interrupts[&level].1.attrs; - items.push(quote!( - #[allow(non_snake_case)] - #[doc = #doc] - #[no_mangle] - #(#attribute)* - unsafe fn #interrupt() { - /// The priority of this interrupt handler - const PRIORITY: u8 = #level; + if level > 0 { + let doc = format!( + "Interrupt handler to dispatch async tasks at priority {}", + level + ); + let attribute = &interrupts.get(&level).expect("UNREACHABLE").1.attrs; + items.push(quote!( + #[allow(non_snake_case)] + #[doc = #doc] + #[no_mangle] + #(#attribute)* + unsafe fn #dispatcher_name() { + /// The priority of this interrupt handler + const PRIORITY: u8 = #level; - rtic::export::run(PRIORITY, || { - // Have the acquire/release semantics outside the checks to no overdo it - core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire); + rtic::export::run(PRIORITY, || { + // Have the acquire/release semantics outside the checks to no overdo it + core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire); - #(#stmts)* + #(#stmts)* - core::sync::atomic::fence(core::sync::atomic::Ordering::Release); - }); - } - )); + core::sync::atomic::fence(core::sync::atomic::Ordering::Release); + }); + } + )); + } else { + items.push(quote!( + #[allow(non_snake_case)] + unsafe fn #dispatcher_name() -> ! { + loop { + // Have the acquire/release semantics outside the checks to no overdo it + core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire); + + #(#stmts)* + + core::sync::atomic::fence(core::sync::atomic::Ordering::Release); + } + } + )); + } } quote!(#(#items)*) diff --git a/macros/src/codegen/main.rs b/macros/src/codegen/main.rs index 90f09ae0d8..8e7138f438 100644 --- a/macros/src/codegen/main.rs +++ b/macros/src/codegen/main.rs @@ -16,11 +16,14 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 { let name = &idle.name; quote!(#name(#name::Context::new())) } else { - // TODO: No idle defined, check for 0-priority tasks and generate an executor if needed - - quote!(loop { - rtic::export::nop() - }) + if analysis.channels.get(&0).is_some() { + let dispatcher = util::zero_prio_dispatcher_ident(); + quote!(#dispatcher();) + } else { + quote!(loop { + rtic::export::nop() + }) + } }; let main = util::suffixed("main"); diff --git a/macros/src/codegen/module.rs b/macros/src/codegen/module.rs index c6f7690fc3..70fbb5e651 100644 --- a/macros/src/codegen/module.rs +++ b/macros/src/codegen/module.rs @@ -135,13 +135,14 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 { // Store a copy of the task cfgs task_cfgs = cfgs.clone(); - let device = &app.args.device; - let enum_ = util::interrupt_ident(); - let interrupt = &analysis - .interrupts - .get(&priority) - .expect("RTIC-ICE: interrupt identifier not found") - .0; + let pend_interrupt = if priority > 0 { + let device = &app.args.device; + let enum_ = util::interrupt_ident(); + let interrupt = &analysis.interrupts.get(&priority).expect("UREACHABLE").0; + quote!(rtic::pend(#device::#enum_::#interrupt);) + } else { + quote!() + }; let internal_spawn_ident = util::internal_task_ident(name, "spawn"); @@ -160,7 +161,7 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 { Err(()) } else { #rq.store(true, core::sync::atomic::Ordering::Release); - rtic::pend(#device::#enum_::#interrupt); + #pend_interrupt Ok(()) } } diff --git a/macros/src/codegen/util.rs b/macros/src/codegen/util.rs index a071ca279d..6552839f76 100644 --- a/macros/src/codegen/util.rs +++ b/macros/src/codegen/util.rs @@ -187,6 +187,10 @@ pub fn need_to_lock_ident(name: &Ident) -> Ident { Ident::new(&format!("{}_that_needs_to_be_locked", name), name.span()) } +pub fn zero_prio_dispatcher_ident() -> Ident { + Ident::new("__rtic_internal_async_0_prio_dispatcher", Span::call_site()) +} + /// The name to get better RT flag errors pub fn rt_err_ident() -> Ident { Ident::new( diff --git a/macros/src/syntax/analyze.rs b/macros/src/syntax/analyze.rs index dd5a9b40d2..b70ceb8b38 100644 --- a/macros/src/syntax/analyze.rs +++ b/macros/src/syntax/analyze.rs @@ -248,33 +248,34 @@ pub(crate) fn app(app: &App) -> Result { } } - // Most shared resources need to be `Send` + // Most shared resources need to be `Send`, only 0 prio does not need it let mut send_types = SendTypes::new(); - let owned_by_idle = Ownership::Owned { priority: 0 }; + for (name, res) in app.shared_resources.iter() { - // Handle not owned by idle if ownerships .get(name) - .map(|ownership| *ownership != owned_by_idle) + .map(|ownership| match *ownership { + Ownership::Owned { priority: ceiling } + | Ownership::CoOwned { priority: ceiling } + | Ownership::Contended { ceiling } => ceiling != 0, + }) .unwrap_or(false) { send_types.insert(res.ty.clone()); } } - // Most local resources need to be `Send` as well + // Most local resources need to be `Send` as well, only 0 prio does not need it for (name, res) in app.local_resources.iter() { - if let Some(idle) = &app.idle { - // Only Send if not in idle or not at idle prio - if idle.args.local_resources.get(name).is_none() - && !ownerships - .get(name) - .map(|ownership| *ownership != owned_by_idle) - .unwrap_or(false) - { - send_types.insert(res.ty.clone()); - } - } else { + if ownerships + .get(name) + .map(|ownership| match *ownership { + Ownership::Owned { priority: ceiling } + | Ownership::CoOwned { priority: ceiling } + | Ownership::Contended { ceiling } => ceiling != 0, + }) + .unwrap_or(false) + { send_types.insert(res.ty.clone()); } }