mirror of
https://github.com/rtic-rs/rtic.git
synced 2024-11-27 14:04:56 +01:00
Support 0 prio tasks
This commit is contained in:
parent
d3d0109dd0
commit
11f0164448
9 changed files with 154 additions and 55 deletions
3
ci/expected/zero-prio-task.run
Normal file
3
ci/expected/zero-prio-task.run
Normal file
|
@ -0,0 +1,3 @@
|
|||
init
|
||||
hello from async
|
||||
hello from async2
|
56
examples/zero-prio-task.rs
Normal file
56
examples/zero-prio-task.rs
Normal file
|
@ -0,0 +1,56 @@
|
|||
#![no_main]
|
||||
#![no_std]
|
||||
#![feature(type_alias_impl_trait)]
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use panic_semihosting as _;
|
||||
|
||||
pub struct NotSend {
|
||||
_0: PhantomData<*const ()>,
|
||||
}
|
||||
|
||||
#[rtic::app(device = lm3s6965, peripherals = true)]
|
||||
mod app {
|
||||
use super::NotSend;
|
||||
use core::marker::PhantomData;
|
||||
use cortex_m_semihosting::{debug, hprintln};
|
||||
|
||||
#[shared]
|
||||
struct Shared {
|
||||
x: NotSend,
|
||||
}
|
||||
|
||||
#[local]
|
||||
struct Local {
|
||||
y: NotSend,
|
||||
}
|
||||
|
||||
#[init]
|
||||
fn init(_cx: init::Context) -> (Shared, Local) {
|
||||
hprintln!("init");
|
||||
|
||||
async_task::spawn().unwrap();
|
||||
async_task2::spawn().unwrap();
|
||||
|
||||
(
|
||||
Shared {
|
||||
x: NotSend { _0: PhantomData },
|
||||
},
|
||||
Local {
|
||||
y: NotSend { _0: PhantomData },
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[task(priority = 0, shared = [x], local = [y])]
|
||||
async fn async_task(_: async_task::Context) {
|
||||
hprintln!("hello from async");
|
||||
}
|
||||
|
||||
#[task(priority = 0, shared = [x])]
|
||||
async fn async_task2(_: async_task2::Context) {
|
||||
hprintln!("hello from async2");
|
||||
|
||||
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
|
||||
}
|
||||
}
|
|
@ -36,6 +36,7 @@ pub fn app(analysis: analyze::Analysis, app: &App) -> Analysis {
|
|||
|
||||
let interrupts: BTreeMap<Priority, _> = priorities
|
||||
.iter()
|
||||
.filter(|prio| **prio > 0) // 0 prio tasks are run in main
|
||||
.copied()
|
||||
.rev()
|
||||
.map(|p| (p, available_interrupt.pop().expect("UNREACHABLE")))
|
||||
|
|
|
@ -32,6 +32,7 @@ pub fn app(app: &App) -> parse::Result<()> {
|
|||
first = Some(name);
|
||||
task.args.priority
|
||||
})
|
||||
.filter(|prio| *prio > 0)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let need = priorities.len();
|
||||
|
|
|
@ -26,9 +26,22 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
|
|||
|
||||
for (&level, channel) in &analysis.channels {
|
||||
let mut stmts = vec![];
|
||||
|
||||
let dispatcher_name = if level > 0 {
|
||||
util::suffixed(&interrupts.get(&level).expect("UNREACHABLE").0.to_string())
|
||||
} else {
|
||||
util::zero_prio_dispatcher_ident()
|
||||
};
|
||||
|
||||
let pend_interrupt = if level > 0 {
|
||||
let device = &app.args.device;
|
||||
let enum_ = util::interrupt_ident();
|
||||
let interrupt = util::suffixed(&interrupts[&level].0.to_string());
|
||||
|
||||
quote!(rtic::pend(#device::#enum_::#dispatcher_name);)
|
||||
} else {
|
||||
// For 0 priority tasks we don't need to pend anything
|
||||
quote!()
|
||||
};
|
||||
|
||||
for name in channel.tasks.iter() {
|
||||
let exec_name = util::internal_task_ident(name, "EXEC");
|
||||
|
@ -60,27 +73,28 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
|
|||
#executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed);
|
||||
if (&mut *#exec_name.get_mut()).poll(|| {
|
||||
#executor_run_ident.store(true, core::sync::atomic::Ordering::Release);
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
#pend_interrupt
|
||||
}) && #rq.load(core::sync::atomic::Ordering::Relaxed) {
|
||||
// If the ready queue is not empty and the executor finished, restart this
|
||||
// dispatch to check if the executor should be restarted.
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
#pend_interrupt
|
||||
}
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
if level > 0 {
|
||||
let doc = format!(
|
||||
"Interrupt handler to dispatch async tasks at priority {}",
|
||||
level
|
||||
);
|
||||
let attribute = &interrupts[&level].1.attrs;
|
||||
let attribute = &interrupts.get(&level).expect("UNREACHABLE").1.attrs;
|
||||
items.push(quote!(
|
||||
#[allow(non_snake_case)]
|
||||
#[doc = #doc]
|
||||
#[no_mangle]
|
||||
#(#attribute)*
|
||||
unsafe fn #interrupt() {
|
||||
unsafe fn #dispatcher_name() {
|
||||
/// The priority of this interrupt handler
|
||||
const PRIORITY: u8 = #level;
|
||||
|
||||
|
@ -94,6 +108,21 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
|
|||
});
|
||||
}
|
||||
));
|
||||
} else {
|
||||
items.push(quote!(
|
||||
#[allow(non_snake_case)]
|
||||
unsafe fn #dispatcher_name() -> ! {
|
||||
loop {
|
||||
// Have the acquire/release semantics outside the checks to no overdo it
|
||||
core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire);
|
||||
|
||||
#(#stmts)*
|
||||
|
||||
core::sync::atomic::fence(core::sync::atomic::Ordering::Release);
|
||||
}
|
||||
}
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
quote!(#(#items)*)
|
||||
|
|
|
@ -16,11 +16,14 @@ pub fn codegen(app: &App, analysis: &Analysis) -> TokenStream2 {
|
|||
let name = &idle.name;
|
||||
quote!(#name(#name::Context::new()))
|
||||
} else {
|
||||
// TODO: No idle defined, check for 0-priority tasks and generate an executor if needed
|
||||
|
||||
if analysis.channels.get(&0).is_some() {
|
||||
let dispatcher = util::zero_prio_dispatcher_ident();
|
||||
quote!(#dispatcher();)
|
||||
} else {
|
||||
quote!(loop {
|
||||
rtic::export::nop()
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
let main = util::suffixed("main");
|
||||
|
|
|
@ -135,13 +135,14 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 {
|
|||
// Store a copy of the task cfgs
|
||||
task_cfgs = cfgs.clone();
|
||||
|
||||
let pend_interrupt = if priority > 0 {
|
||||
let device = &app.args.device;
|
||||
let enum_ = util::interrupt_ident();
|
||||
let interrupt = &analysis
|
||||
.interrupts
|
||||
.get(&priority)
|
||||
.expect("RTIC-ICE: interrupt identifier not found")
|
||||
.0;
|
||||
let interrupt = &analysis.interrupts.get(&priority).expect("UREACHABLE").0;
|
||||
quote!(rtic::pend(#device::#enum_::#interrupt);)
|
||||
} else {
|
||||
quote!()
|
||||
};
|
||||
|
||||
let internal_spawn_ident = util::internal_task_ident(name, "spawn");
|
||||
|
||||
|
@ -160,7 +161,7 @@ pub fn codegen(ctxt: Context, app: &App, analysis: &Analysis) -> TokenStream2 {
|
|||
Err(())
|
||||
} else {
|
||||
#rq.store(true, core::sync::atomic::Ordering::Release);
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
#pend_interrupt
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -187,6 +187,10 @@ pub fn need_to_lock_ident(name: &Ident) -> Ident {
|
|||
Ident::new(&format!("{}_that_needs_to_be_locked", name), name.span())
|
||||
}
|
||||
|
||||
pub fn zero_prio_dispatcher_ident() -> Ident {
|
||||
Ident::new("__rtic_internal_async_0_prio_dispatcher", Span::call_site())
|
||||
}
|
||||
|
||||
/// The name to get better RT flag errors
|
||||
pub fn rt_err_ident() -> Ident {
|
||||
Ident::new(
|
||||
|
|
|
@ -248,35 +248,36 @@ pub(crate) fn app(app: &App) -> Result<Analysis, syn::Error> {
|
|||
}
|
||||
}
|
||||
|
||||
// Most shared resources need to be `Send`
|
||||
// Most shared resources need to be `Send`, only 0 prio does not need it
|
||||
let mut send_types = SendTypes::new();
|
||||
let owned_by_idle = Ownership::Owned { priority: 0 };
|
||||
|
||||
for (name, res) in app.shared_resources.iter() {
|
||||
// Handle not owned by idle
|
||||
if ownerships
|
||||
.get(name)
|
||||
.map(|ownership| *ownership != owned_by_idle)
|
||||
.map(|ownership| match *ownership {
|
||||
Ownership::Owned { priority: ceiling }
|
||||
| Ownership::CoOwned { priority: ceiling }
|
||||
| Ownership::Contended { ceiling } => ceiling != 0,
|
||||
})
|
||||
.unwrap_or(false)
|
||||
{
|
||||
send_types.insert(res.ty.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Most local resources need to be `Send` as well
|
||||
// Most local resources need to be `Send` as well, only 0 prio does not need it
|
||||
for (name, res) in app.local_resources.iter() {
|
||||
if let Some(idle) = &app.idle {
|
||||
// Only Send if not in idle or not at idle prio
|
||||
if idle.args.local_resources.get(name).is_none()
|
||||
&& !ownerships
|
||||
if ownerships
|
||||
.get(name)
|
||||
.map(|ownership| *ownership != owned_by_idle)
|
||||
.map(|ownership| match *ownership {
|
||||
Ownership::Owned { priority: ceiling }
|
||||
| Ownership::CoOwned { priority: ceiling }
|
||||
| Ownership::Contended { ceiling } => ceiling != 0,
|
||||
})
|
||||
.unwrap_or(false)
|
||||
{
|
||||
send_types.insert(res.ty.clone());
|
||||
}
|
||||
} else {
|
||||
send_types.insert(res.ty.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let mut channels = Channels::new();
|
||||
|
|
Loading…
Reference in a new issue