Support 0 prio tasks

This commit is contained in:
Emil Fresk 2023-01-08 21:10:06 +01:00 committed by Henrik Tjäder
parent d3d0109dd0
commit 11f0164448
9 changed files with 154 additions and 55 deletions

View file

@ -0,0 +1,3 @@
init
hello from async
hello from async2

View file

@ -0,0 +1,56 @@
#![no_main]
#![no_std]
#![feature(type_alias_impl_trait)]
use core::marker::PhantomData;
use panic_semihosting as _;
pub struct NotSend {
_0: PhantomData<*const ()>,
}
#[rtic::app(device = lm3s6965, peripherals = true)]
mod app {
use super::NotSend;
use core::marker::PhantomData;
use cortex_m_semihosting::{debug, hprintln};
#[shared]
struct Shared {
x: NotSend,
}
#[local]
struct Local {
y: NotSend,
}
#[init]
fn init(_cx: init::Context) -> (Shared, Local) {
hprintln!("init");
async_task::spawn().unwrap();
async_task2::spawn().unwrap();
(
Shared {
x: NotSend { _0: PhantomData },
},
Local {
y: NotSend { _0: PhantomData },
},
)
}
#[task(priority = 0, shared = [x], local = [y])]
async fn async_task(_: async_task::Context) {
hprintln!("hello from async");
}
#[task(priority = 0, shared = [x])]
async fn async_task2(_: async_task2::Context) {
hprintln!("hello from async2");
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
}
}

View file

@ -36,6 +36,7 @@ pub fn app(analysis: analyze::Analysis, app: &App) -> Analysis {
let interrupts: BTreeMap<Priority, _> = priorities let interrupts: BTreeMap<Priority, _> = priorities
.iter() .iter()
.filter(|prio| **prio > 0) // 0 prio tasks are run in main
.copied() .copied()
.rev() .rev()
.map(|p| (p, available_interrupt.pop().expect("UNREACHABLE"))) .map(|p| (p, available_interrupt.pop().expect("UNREACHABLE")))

View file

@ -32,6 +32,7 @@ pub fn app(app: &App) -> parse::Result<()> {
first = Some(name); first = Some(name);
task.args.priority task.args.priority
}) })
.filter(|prio| *prio > 0)
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let need = priorities.len(); let need = priorities.len();

View file

@ -26,9 +26,22 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
for (&level, channel) in &analysis.channels { for (&level, channel) in &analysis.channels {
let mut stmts = vec![]; let mut stmts = vec![];
let dispatcher_name = if level > 0 {
util::suffixed(&interrupts.get(&level).expect("UNREACHABLE").0.to_string())
} else {
util::zero_prio_dispatcher_ident()
};
let pend_interrupt = if level > 0 {
let device = &app.args.device; let device = &app.args.device;
let enum_ = util::interrupt_ident(); let enum_ = util::interrupt_ident();
let interrupt = util::suffixed(&interrupts[&level].0.to_string());
quote!(rtic::pend(#device::#enum_::#dispatcher_name);)
} else {
// For 0 priority tasks we don't need to pend anything
quote!()
};
for name in channel.tasks.iter() { for name in channel.tasks.iter() {
let exec_name = util::internal_task_ident(name, "EXEC"); let exec_name = util::internal_task_ident(name, "EXEC");
@ -60,27 +73,28 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
#executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed); #executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed);
if (&mut *#exec_name.get_mut()).poll(|| { if (&mut *#exec_name.get_mut()).poll(|| {
#executor_run_ident.store(true, core::sync::atomic::Ordering::Release); #executor_run_ident.store(true, core::sync::atomic::Ordering::Release);
rtic::pend(#device::#enum_::#interrupt); #pend_interrupt
}) && #rq.load(core::sync::atomic::Ordering::Relaxed) { }) && #rq.load(core::sync::atomic::Ordering::Relaxed) {
// If the ready queue is not empty and the executor finished, restart this // If the ready queue is not empty and the executor finished, restart this
// dispatch to check if the executor should be restarted. // dispatch to check if the executor should be restarted.
rtic::pend(#device::#enum_::#interrupt); #pend_interrupt
} }
} }
)); ));
} }
if level > 0 {
let doc = format!( let doc = format!(
"Interrupt handler to dispatch async tasks at priority {}", "Interrupt handler to dispatch async tasks at priority {}",
level level
); );
let attribute = &interrupts[&level].1.attrs; let attribute = &interrupts.get(&level).expect("UNREACHABLE").1.attrs;
items.push(quote!( items.push(quote!(
#[allow(non_snake_case)] #[allow(non_snake_case)]
#[doc = #doc] #[doc = #doc]
#[no_mangle] #[no_mangle]
#(#attribute)* #(#attribute)*
unsafe fn #interrupt() { unsafe fn #dispatcher_name() {
/// The priority of this interrupt handler /// The priority of this interrupt handler
const PRIORITY: u8 = #level; const PRIORITY: u8 = #level;
@ -94,6 +108,21 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
}); });
} }
)); ));
} else {
items.push(quote!(
#[allow(non_snake_case)]
unsafe fn #dispatcher_name() -> ! {
loop {
// Have the acquire/release semantics outside the checks to no overdo it
core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire);
#(#stmts)*
core::sync::atomic::fence(core::sync::atomic::Ordering::Release);
}
}
));
}
} }
quote!(#(#items)*) quote!(#(#items)*)

View file

@ -16,11 +16,14 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
let name = &idle.name; let name = &idle.name;
quote!(#name(#name::Context::new())) quote!(#name(#name::Context::new()))
} else { } else {
// TODO: No idle defined, check for 0-priority tasks and generate an executor if needed if analysis.channels.get(&0).is_some() {
let dispatcher = util::zero_prio_dispatcher_ident();
quote!(#dispatcher();)
} else {
quote!(loop { quote!(loop {
rtic::export::nop() rtic::export::nop()
}) })
}
}; };
let main = util::suffixed("main"); let main = util::suffixed("main");

View file

@ -135,13 +135,14 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 {
// Store a copy of the task cfgs // Store a copy of the task cfgs
task_cfgs = cfgs.clone(); task_cfgs = cfgs.clone();
let pend_interrupt = if priority > 0 {
let device = &app.args.device; let device = &app.args.device;
let enum_ = util::interrupt_ident(); let enum_ = util::interrupt_ident();
let interrupt = &analysis let interrupt = &analysis.interrupts.get(&priority).expect("UREACHABLE").0;
.interrupts quote!(rtic::pend(#device::#enum_::#interrupt);)
.get(&priority) } else {
.expect("RTIC-ICE: interrupt identifier not found") quote!()
.0; };
let internal_spawn_ident = util::internal_task_ident(name, "spawn"); let internal_spawn_ident = util::internal_task_ident(name, "spawn");
@ -160,7 +161,7 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 {
Err(()) Err(())
} else { } else {
#rq.store(true, core::sync::atomic::Ordering::Release); #rq.store(true, core::sync::atomic::Ordering::Release);
rtic::pend(#device::#enum_::#interrupt); #pend_interrupt
Ok(()) Ok(())
} }
} }

View file

@ -187,6 +187,10 @@ pub fn need_to_lock_ident(name: &Ident) -> Ident {
Ident::new(&format!("{}_that_needs_to_be_locked", name), name.span()) Ident::new(&format!("{}_that_needs_to_be_locked", name), name.span())
} }
pub fn zero_prio_dispatcher_ident() -> Ident {
Ident::new("__rtic_internal_async_0_prio_dispatcher", Span::call_site())
}
/// The name to get better RT flag errors /// The name to get better RT flag errors
pub fn rt_err_ident() -> Ident { pub fn rt_err_ident() -> Ident {
Ident::new( Ident::new(

View file

@ -248,35 +248,36 @@ pub(crate) fn app(app: &App) -> Result<Analysis, syn::Error> {
} }
} }
// Most shared resources need to be `Send` // Most shared resources need to be `Send`, only 0 prio does not need it
let mut send_types = SendTypes::new(); let mut send_types = SendTypes::new();
let owned_by_idle = Ownership::Owned { priority: 0 };
for (name, res) in app.shared_resources.iter() { for (name, res) in app.shared_resources.iter() {
// Handle not owned by idle
if ownerships if ownerships
.get(name) .get(name)
.map(|ownership| *ownership != owned_by_idle) .map(|ownership| match *ownership {
Ownership::Owned { priority: ceiling }
| Ownership::CoOwned { priority: ceiling }
| Ownership::Contended { ceiling } => ceiling != 0,
})
.unwrap_or(false) .unwrap_or(false)
{ {
send_types.insert(res.ty.clone()); send_types.insert(res.ty.clone());
} }
} }
// Most local resources need to be `Send` as well // Most local resources need to be `Send` as well, only 0 prio does not need it
for (name, res) in app.local_resources.iter() { for (name, res) in app.local_resources.iter() {
if let Some(idle) = &app.idle { if ownerships
// Only Send if not in idle or not at idle prio
if idle.args.local_resources.get(name).is_none()
&& !ownerships
.get(name) .get(name)
.map(|ownership| *ownership != owned_by_idle) .map(|ownership| match *ownership {
Ownership::Owned { priority: ceiling }
| Ownership::CoOwned { priority: ceiling }
| Ownership::Contended { ceiling } => ceiling != 0,
})
.unwrap_or(false) .unwrap_or(false)
{ {
send_types.insert(res.ty.clone()); send_types.insert(res.ty.clone());
} }
} else {
send_types.insert(res.ty.clone());
}
} }
let mut channels = Channels::new(); let mut channels = Channels::new();