From d665ea95b380f99c1fc040fa53dea5a4ded90063 Mon Sep 17 00:00:00 2001
From: Jorge Aparicio <jorge@japaric.io>
Date: Mon, 14 May 2018 21:22:50 +0200
Subject: [PATCH] WIP

---
 .gitignore                                    |   1 +
 Cargo.toml                                    |  20 +-
 book/book.toml                                |   5 +
 book/src/SUMMARY.md                           |  24 +
 book/src/internals.md                         |   7 +
 book/src/internals/capacity.md                |  48 ++
 book/src/internals/ceilings.md                |   1 +
 book/src/internals/claim.md                   | 154 ++++
 book/src/internals/dispatcher.md              | 124 ++++
 book/src/internals/locks.md                   |   1 +
 book/src/internals/messages.md                |   3 +
 book/src/internals/schedule-now.md            |  47 ++
 book/src/internals/scheduler.md               |  40 ++
 book/src/internals/tq.md                      |   8 +
 book/src/internals/tq/handler.md              |  87 +++
 book/src/internals/tq/schedule-after.md       | 115 +++
 book/src/preface.md                           |   4 +
 book/src/user/basic.md                        |  80 +++
 book/src/user/events.md                       |  65 ++
 book/src/user/guide.md                        |   4 +
 book/src/user/late-resources.md               |  49 ++
 book/src/user/messages.md                     | 148 ++++
 book/src/user/periodic.md                     |  91 +++
 book/src/user/pools.md                        | 127 ++++
 book/src/user/scheduling.md                   | 126 ++++
 book/src/user/sharing.md                      | 194 +++++
 book/src/user/state.md                        |  48 ++
 ci/script.sh                                  |  40 +-
 examples/empty.rs                             |  20 +-
 examples/{interrupt.rs => event-task.rs}      |  18 +
 examples/periodic-payload.rs                  |  54 +-
 examples/periodic-preemption-payload.rs       | 130 ++--
 examples/periodic-preemption.rs               |  46 +-
 examples/periodic.rs                          |  49 +-
 ...r.rs => schedule-after-from-event-task.rs} |  34 +-
 .../{async-after.rs => schedule-after.rs}     |  20 +-
 ...ync.rs => schedule-now-from-event-task.rs} |  34 +-
 examples/{async.rs => schedule-now.rs}        |  28 +-
 examples/user-struct.rs                       |  22 +-
 macros/Cargo.toml                             |   2 +-
 macros/src/analyze.rs                         |  63 +-
 macros/src/check.rs                           |   8 +-
 macros/src/trans.rs                           | 677 ++++++++++--------
 src/{ => _impl}/instant.rs                    |  14 +-
 src/_impl/mod.rs                              |  68 ++
 src/{ => _impl}/tq.rs                         |  53 +-
 src/lib.rs                                    | 107 +--
 src/resource.rs                               |  62 +-
 tests/cfail.rs                                |  12 +-
 tests/cfail/critical-section.rs               |   9 +-
 tests/cfail/duplicated-task.rs                |  29 -
 tests/cfail/exception.rs                      |   1 +
 tests/cfail/idle.rs                           |   1 +
 tests/cfail/init-resource-share-idle.rs       |   1 +
 tests/cfail/init-resource-share-task.rs       |   1 +
 tests/cfail/init.rs                           |   1 +
 tests/cfail/interrupt.rs                      |   1 +
 tests/cfail/late-resource-init.rs             |   1 +
 tests/cfail/lock.rs                           |   9 +-
 tests/cfail/priority-too-high.rs              |   1 +
 tests/cfail/priority-too-low.rs               |   1 +
 tests/cfail/resource-alias.rs                 |   1 +
 tests/cfail/resource-not-send-sync.rs         |   1 +
 tests/cfail/token-outlive.rs                  |   5 +-
 tests/cfail/token-transfer.rs                 |   9 +-
 tests/cfail/wrong-threshold.rs                |  11 +-
 66 files changed, 2556 insertions(+), 709 deletions(-)
 create mode 100644 book/book.toml
 create mode 100644 book/src/SUMMARY.md
 create mode 100644 book/src/internals.md
 create mode 100644 book/src/internals/capacity.md
 create mode 100644 book/src/internals/ceilings.md
 create mode 100644 book/src/internals/claim.md
 create mode 100644 book/src/internals/dispatcher.md
 create mode 100644 book/src/internals/locks.md
 create mode 100644 book/src/internals/messages.md
 create mode 100644 book/src/internals/schedule-now.md
 create mode 100644 book/src/internals/scheduler.md
 create mode 100644 book/src/internals/tq.md
 create mode 100644 book/src/internals/tq/handler.md
 create mode 100644 book/src/internals/tq/schedule-after.md
 create mode 100644 book/src/preface.md
 create mode 100644 book/src/user/basic.md
 create mode 100644 book/src/user/events.md
 create mode 100644 book/src/user/guide.md
 create mode 100644 book/src/user/late-resources.md
 create mode 100644 book/src/user/messages.md
 create mode 100644 book/src/user/periodic.md
 create mode 100644 book/src/user/pools.md
 create mode 100644 book/src/user/scheduling.md
 create mode 100644 book/src/user/sharing.md
 create mode 100644 book/src/user/state.md
 rename examples/{interrupt.rs => event-task.rs} (61%)
 rename examples/{interrupt-async-after.rs => schedule-after-from-event-task.rs} (62%)
 rename examples/{async-after.rs => schedule-after.rs} (62%)
 rename examples/{interrupt-async.rs => schedule-now-from-event-task.rs} (62%)
 rename examples/{async.rs => schedule-now.rs} (54%)
 rename src/{ => _impl}/instant.rs (77%)
 create mode 100644 src/_impl/mod.rs
 rename src/{ => _impl}/tq.rs (62%)
 delete mode 100644 tests/cfail/duplicated-task.rs

diff --git a/.gitignore b/.gitignore
index 29204d6567..a5858178c4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,5 +2,6 @@
 *.org
 .#*
 .gdb_history
+/book/book
 Cargo.lock
 target/
diff --git a/Cargo.toml b/Cargo.toml
index cea52dd77e..006998b345 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -10,27 +10,27 @@ keywords = ["arm", "cortex-m"]
 license = "MIT OR Apache-2.0"
 name = "cortex-m-rtfm"
 repository = "https://github.com/japaric/cortex-m-rtfm"
-version = "0.3.2"
+version = "0.4.0"
 
 [[example]]
-name = "async-after"
+name = "schedule-after"
 required-features = ["timer-queue"]
 
 [[example]]
-name = "async"
+name = "schedule-now"
 
 [[example]]
 name = "empty"
 
 [[example]]
-name = "interrupt"
+name = "event-task"
 
 [[example]]
-name = "interrupt-async"
+name = "schedule-now-from-event-task"
 required-features = ["timer-queue"]
 
 [[example]]
-name = "interrupt-async-after"
+name = "schedule-after-from-event-task"
 required-features = ["timer-queue"]
 
 [[example]]
@@ -54,7 +54,7 @@ name = "user-struct"
 required-features = ["timer-queue"]
 
 [dependencies]
-cortex-m = "0.4.0"
+cortex-m = "0.5.0"
 cortex-m-rtfm-macros = { path = "macros", version = "0.3.1" }
 heapless = "0.3.6"
 typenum = "1.10.0"
@@ -63,13 +63,15 @@ typenum = "1.10.0"
 compiletest_rs = "0.3.5"
 
 [dev-dependencies]
+cortex-m-rt = "0.5.1"
 panic-abort = "0.1.1"
-panic-itm = "0.1.0"
+panic-itm = "0.1.1"
+panic-semihosting = "0.2.0"
 typenum = "1.10.0"
 
 [dev-dependencies.stm32f103xx]
 features = ["rt"]
-version = "0.9.0"
+version = "0.10.0"
 
 [features]
 cm7-r0p1 = ["cortex-m/cm7-r0p1"]
diff --git a/book/book.toml b/book/book.toml
new file mode 100644
index 0000000000..e1c11f60d0
--- /dev/null
+++ b/book/book.toml
@@ -0,0 +1,5 @@
+[book]
+authors = ["Jorge Aparicio"]
+multilingual = false
+src = "src"
+title = "The RTFM book"
diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md
new file mode 100644
index 0000000000..33d516c6e6
--- /dev/null
+++ b/book/src/SUMMARY.md
@@ -0,0 +1,24 @@
+# Summary
+
+- [Preface](./preface.md)
+- [User guide](./user/guide.md)
+  - [Basic organization](./user/basic.md)
+  - [Reacting to events](./user/events.md)
+  - [Adding state](./user/state.md)
+    - [Runtime initialized resources](./user/late-resources.md)
+  - [Message passing](./user/messages.md)
+  - [Priority based scheduling](./user/scheduling.md)
+  - [Resource sharing](./user/sharing.md)
+  - [Object pools](./user/pools.md)
+  - [Periodic tasks](./user/periodic.md)
+- [Under the hood](./internals.md)
+  - [The scheduler](./internals/scheduler.md)
+  - [`claim`](./internals/claim.md)
+  - [Message passing](./internals/messages.md)
+    - [Dispatching tasks](./internals/dispatcher.md)
+    - [`schedule_now`](./internals/schedule-now.md)
+    - [Capacity](./internals/capacity.md)
+  - [The timer queue](./internals/tq.md)
+    - [`schedule_after`](./internals/tq/schedule-after.md)
+    - [The timer queue handler](./internals/tq/handler.md)
+  - [Ceiling analysis](./internals/ceilings.md)
diff --git a/book/src/internals.md b/book/src/internals.md
new file mode 100644
index 0000000000..151cc75442
--- /dev/null
+++ b/book/src/internals.md
@@ -0,0 +1,7 @@
+# Under the hood
+
+This section describes the implementation of RTFM. This information is useful to both users of the
+Cortex-M implementation of RTFM and developers interested in porting RTFM to other architectures.
+The former group will get a better understanding of the performance characteristics of RTFM; the
+latter group will get a high level overview of the Cortex-M implementation that they wouldn't
+otherwise get from just reading the code.
diff --git a/book/src/internals/capacity.md b/book/src/internals/capacity.md
new file mode 100644
index 0000000000..aa63d93ad8
--- /dev/null
+++ b/book/src/internals/capacity.md
@@ -0,0 +1,48 @@
+# Capacity
+
+All the queues and arrays internally used by the RTFM runtime are fixed in size and allocated in
+`static` variables. The user directly controls the size of most of these data structures via the
+`capacity` property of a task in the `app!` specification.
+
+For example if the user specifies that task `a` has a `capacity` of 3 then the `app!` macro
+generates the following code:
+
+``` rust
+mod a {
+    const CAPACITY: usize = 3;
+
+    static mut FREE_QUEUE: Queue<u8, U3> = Queue::new();
+
+    static mut PAYLOADS: [i32; CAPACITY] = unsafe { uninitialized() };
+}
+
+// generated by `app!`
+fn main() {
+    unsafe {
+        // ..
+
+        // initialize the `FREE_QUEUE` of each task
+        for index in 0..a::CAPACITY {
+            a::FREE_QUEUE.enqueue_unchecked(index as u8);
+        }
+
+        // ..
+
+        // call user provided `init`
+        init(init::Context());
+
+        // ..
+    }
+}
+```
+
+There is a choice to be made by the implementers of the runtime when it comes to the size (capacity)
+of the ready queues.
+
+Ready queues hold instances of tasks pending execution of potentially different types. However, for
+each task we know the maximum number of instances that can be scheduled and pending execution; this
+information is in the specification (`capacity`). If we choose the capacity of the ready queue to be
+the sum of the max number of instances of each different task it can hold then we can eliminate the
+possibility of it ever running out of capacity -- in the worst case scenario the ready queue will
+become full. In the Cortex-M implementation of RTFM we chose this sum as the capacity of the ready
+queues; this let us eliminate capacity checks when adding new tasks to the ready queue.
diff --git a/book/src/internals/ceilings.md b/book/src/internals/ceilings.md
new file mode 100644
index 0000000000..dbc02d29a1
--- /dev/null
+++ b/book/src/internals/ceilings.md
@@ -0,0 +1 @@
+# Ceiling analysis
diff --git a/book/src/internals/claim.md b/book/src/internals/claim.md
new file mode 100644
index 0000000000..7ff50d78bf
--- /dev/null
+++ b/book/src/internals/claim.md
@@ -0,0 +1,154 @@
+# `claim`
+
+At the center of RTFM we have the `Resource` abstraction. A `Resource` is a mechanism to share data
+between two or more tasks (contexts of execution) that can potentially run at different priorities.
+When tasks have different priorities they can preempt each other and this can lead to data races if
+the access to the data is *not* synchronized. A `Resource` eliminates the data race problem by
+forcing the tasks to access the data through a critical section. While in a critical section the
+other tasks that share the `Resource` can *not* start.
+
+As tasks in RTFM are all dispatched in interrupt handlers one way to create a critical section is to
+disable all interrupts (`cpsid i` instruction). However, this approach also prevents tasks that are
+not contending for the resource from starting, which can reduce the responsiveness of the system.
+The Cortex-M implementation uses priority based critical sections (AKA Priority Ceiling Protocol) to
+avoid this problem, or at least to reduce its effect.
+
+The NVIC, which is the core of the RTFM scheduler, supports dynamic reprioritization of interrupts
+via the [BASEPRI] register. By writing to this register we can increase the priority of the current
+interrupt / task preventing tasks with lower priority from starting. A temporal increase of the
+priority can be used as a critical section; this is how `claim` works in the Cortex-M implementation
+of RTFM.
+
+[BASEPRI]: https://developer.arm.com/products/architecture/m-profile/docs/100701/latest/special-purpose-mask-registers
+
+The question is how much to increase the priority in these critical sections? The value must be high
+enough to prevent data races but not too high that it blocks unrelated tasks. The answer to this
+question comes from the Priority Ceiling Protocol: each resource has a priority *ceiling*; to access
+the data a critical section must be created by temporarily increasing the priority to match the
+priority ceiling; the priority ceiling of a resource is equal to the priority of the highest
+priority task that can access the resource.
+
+In the Cortex-M implementation of RTFM we store the ceiling of a resource in the type system and we
+also track the dynamic priority of a task using the type system. The main reason for this is
+generating optimal machine code for `claim`s.
+
+Here's what the `Resource` abstraction looks like:
+
+``` rust
+/// Priority token
+pub struct Priority<P> { _not_send_or_sync: *const (), _priority: PhantomData<P> }
+
+pub unsafe trait Resource {
+    /// The number of priority bits supported by the NVIC (device specific)
+    const NVIC_PRIO_BITS: u8;
+
+    /// The priority "ceiling" of this resource
+    type Ceiling: Unsigned; // type level integer (cf. typenum)
+
+    /// The data protected by this resource
+    type Data: 'static + Send;
+
+    // Returns a reference to the `static mut` variable protected by this resource
+    #[doc(hidden)]
+    unsafe fn _var() -> &'static mut Self::Data;
+
+    /// Borrows the resource data while the priority is high enough
+    // NOTE there's a mutable version of this method: `borrow_mut`
+    fn borrow<P, 'p>(&'t self, p: &'p Priority<P>) -> &'p Self::Data
+    where
+        P: IsGreaterOrEqual<Self::Ceiling, Output = True>,
+    {
+        unsafe { Self::_var() }
+    }
+
+    /// Claim the data proceted by this resource
+    // NOTE there's a mutable version of this method: `claim_mut`
+    fn claim<P>(&self, t: &mut Priority<P>, f: F)
+    where
+        F: FnOnce(&Self::Data, &mut Priority<Maximum<P, Self::Ceiling>)
+        P: Max<Self::Ceiling> + Unsigned,
+        Self::Ceiling: Unsigned,
+    {
+        unsafe {
+            if P::to_u8() >= Self::Ceiling::to_u8() {
+                // the priority doesn't need to be raised further
+                f(Self::get(), &mut Priority::new())
+            } else {
+                // the hardware priority ceiling of this resource
+                let new = (1 << Self::NVIC_PRIO_BITS - Self::Ceiling::to_u8()) <<
+                    (8 - Self::NVIC_PRIO_BITS);
+
+                let old = basepri::read();
+
+                // start the critical section by raising the dynamic priority
+                basepri::write(new);
+
+                // execute user provided code inside the critical section
+                let r = f(Self::get(), &mut Priority::new());
+
+                // end the critical section by restoring the old dynamic priority
+                basepri::write(old);
+
+                r
+            }
+        }
+    }
+}
+```
+
+The `Priority` *token* is used to track the current dynamic priority of a task. When a task starts
+its `Context` contains a `Priority` token that represents the priority declared in `app!`. For
+example, if the task priority was set to `2` the threshold token will have type `Threshold<U2>`
+where `U2` is the type level version of `2` (cf. [`typenum`]).
+
+[`typenum`]: https://docs.rs/typenum
+
+The `claim` method creates a critical section by temporarily raising the task priority. Within this
+critical section (closure) a new `Priority` token is provided while the outer `Priority` token is
+invalidated due to borrow semantics (mutably borrowed / frozen).
+
+When generating code the `app!` macro creates a `struct` that implements the `Resource` trait for
+each resource declared in `resources`. The data behind each `Resource` is a `static mut` variable:
+
+``` rust
+// given: `resources: { static FOO: u32 = 0 }`
+
+// app! produces
+mod __resource {
+    pub struct FOO { _not_send_or_sync: *const () }
+
+    unsafe impl Resource for FOO {
+        const NVIC_PRIO_BITS = stm32f103xx::NVIC_PRIO_BITS;
+
+        type Ceiling = U3;
+
+        type Data = u32;
+
+        unsafe fn _var() -> &'static mut u32 {
+            static mut FOO: u32 = 0;
+
+            &mut FOO
+        }
+    }
+}
+```
+
+Theses resource `struct` are packed in `Resources` `struct`s and then placed in the `Context` of
+each task.
+
+``` rust
+// given: `tasks: { a: { resources: [FOO, BAR] } }`
+
+// app! produces
+mod a {
+    pub struct Context {
+        pub resources: Resources,
+        // ..
+    }
+
+    pub struct Resources {
+        pub FOO: __resource::FOO,
+        pub BAR: __resource::BAR,
+    }
+}
+```
diff --git a/book/src/internals/dispatcher.md b/book/src/internals/dispatcher.md
new file mode 100644
index 0000000000..df0148d5e3
--- /dev/null
+++ b/book/src/internals/dispatcher.md
@@ -0,0 +1,124 @@
+# Dispatching tasks
+
+Let's first analyze the simpler case of dispatching tasks with `input` type of `()`, i.e. the
+message contained no payload, and was scheduled using `schedule_now`.
+
+All tasks scheduled by other tasks, i.e. tasks not bound to an interrupt, that are to be executed at
+the same priority are dispatched from the same *task dispatcher*. Task dispatchers are implemented
+on top of the free interrupt handlers which are declared in `free_interrupts`. Each task dispatcher
+has a queue of tasks ready to execute -- this queues are called *ready queues*. RTFM uses
+`heapless::RingBuffer` for all the internal queues; these queues are lock-free and wait-free when
+the queue has a single consumer and a single producer.
+
+Let's illustrate the workings of task dispatchers with an example. Assume we have an application
+with 4 tasks not bound to interrupts: two of them, `a` and `b`, are dispatched at priority 1; and
+the other two, `c` and `d`, are dispatched at priority 2. This is what the task dispatchers produced
+by the `app!` macro look like:
+
+``` rust
+// priority = 1
+unsafe extern "C" fn EXTI0() {
+    while let Some(task) = __1::READY_QUEUE.dequeue() {
+        match task {
+            __1::Task::a => a(a::Context::new()),
+            __1::Task::b => b(b::Context::new()),
+        }
+    }
+}
+
+// priority = 2
+unsafe extern "C" fn EXTI1() {
+    while let Some(task) = __2::READY_QUEUE.dequeue() {
+        match task {
+            __2::Task::c => c(c::Context::new()),
+            __2::Task::d => d(d::Context::new()),
+        }
+    }
+}
+
+mod __1 {
+    // Tasks dispatched at priority = 1
+    enum Task { a, b }
+
+    static mut READY_QUEUE: Queue<Task, UN> = Queue::new();
+}
+
+mod __2 {
+    // Tasks dispatched at priority = 2
+    enum Task { c, d }
+
+    static mut READY_QUEUE: Queue<Task, UN> = Queue::new();
+}
+```
+
+Note that we have two queues here: one for priority = 1 and another for priority = 2. The
+interrupts used to dispatch tasks are chosen from the list of `free_interrupts` declared in the
+`app!` macro.
+
+#### Payloads
+
+Now let's add payloads to the messages. The message queues will now not only store the task name
+(`enum Task`) but also an *index* (`u8`) to the payload.
+
+Let's first look at how the first task dispatcher changed: let's say that tasks `a` and `b` now
+expect payloads of `i32` and `i16`, respectively.
+
+``` rust
+mod a {
+    static mut PAYLOADS: [i32; N] = unsafe { uninitialized() };
+
+    static mut FREE_QUEUE: Queue<u8, UN> = Queue::new();
+
+    // ..
+}
+
+mod b {
+    static mut PAYLOADS: [i16; N] = unsafe { uninitialized() };
+
+    static mut FREE_QUEUE: Queue<u8, UN> = Queue::new();
+
+    // ..
+}
+
+mod __1 {
+    // Tasks dispatched at priority = 1
+    enum Task { a, b }
+
+    static mut READY_QUEUE: Queue<(Task, u8), UN> = Queue::new();
+}
+
+mod __2 {
+    // Tasks dispatched at priority = 2
+    enum Task { c, d }
+
+    static mut READY_QUEUE: Queue<(Task, u8), UN> = Queue::new();
+}
+
+// priority = 1
+unsafe extern "C" fn EXTI0() {
+    while let Some(task) = READY_QUEUE.dequeue() {
+        match (task, index) {
+            __1::Task::a => {
+                let payload: i32 = ptr::read(&a::PAYLOADS[index]);
+                a::FREE_QUEUE.enqueue_unchecked(index);
+
+                a(a::Context::new(payload))
+            },
+            __2::Task::b => {
+                let payload: i16 = ptr::read(&b::PAYLOADS[index]);
+                b::FREE_QUEUE.enqueue_unchecked(index);
+
+                b(b::Context::new(payload))
+            },
+        }
+    }
+}
+```
+
+Each task dispatcher continuously dequeues tasks from the ready queue until it's empty. After
+dequeuing a task - index pair the task dispatcher looks at which task it has to execute (`match`)
+and uses this information to fetch (`ptr::read`) the payload from the corresponding list of
+payloads (`PAYLOADS`) -- there's one such list per task. After retrieving the payload this leaves an
+empty slot in the list of payloads; the index to this empty slot is appended to a list of free slots
+(`FREE_QUEUE`). Finally, the task dispatcher proceed to execute the task using the message payload
+as the input.
diff --git a/book/src/internals/locks.md b/book/src/internals/locks.md
new file mode 100644
index 0000000000..31ab4b18ad
--- /dev/null
+++ b/book/src/internals/locks.md
@@ -0,0 +1 @@
+# Locks
diff --git a/book/src/internals/messages.md b/book/src/internals/messages.md
new file mode 100644
index 0000000000..9486da193a
--- /dev/null
+++ b/book/src/internals/messages.md
@@ -0,0 +1,3 @@
+# Message passing
+
+This section describes how message passing is implemented in RTFM.
diff --git a/book/src/internals/schedule-now.md b/book/src/internals/schedule-now.md
new file mode 100644
index 0000000000..1071dbf89b
--- /dev/null
+++ b/book/src/internals/schedule-now.md
@@ -0,0 +1,47 @@
+# `schedule_now`
+
+We saw how tasks dispatching works; now let's see how `schedule_now` is implemented. Assume that
+task `a` can be `schedule_now`-ed by task `b`; in this scenario the `app!` macro generates code like
+this:
+
+``` rust
+mod __schedule_now {
+    pub struct a { _not_send_or_sync: PhantomData<*const ()> }
+
+    impl a {
+        fn schedule_now(&mut self, t: &mut Threshold, payload: i32) -> Result<(), i32> {
+            if let Some(index) = a::FREE_QUEUE.claim_mut(t, |fq, _| fq.dequeue()) {
+                ptr::write(&mut a::PAYLOADS[index], payload);
+
+                __1::READY_QUEUE.claim_mut(t, |rq, _| {
+                    rq.enqueue_unchecked((__1::Task::A, index))
+                });
+
+                NVIC.set_pending(Interrupt::EXTI0);
+            } else {
+                Err(payload)
+            }
+        }
+    }
+}
+
+mod b {
+    pub struct Tasks { a: __schedule_now::a }
+
+    pub struct Context {
+        tasks: Tasks,
+        // ..
+    }
+}
+```
+
+The first thing to do to schedule a new task is to get a free slot, where to store the payload, from
+the `FREE_QUEUE`. If the list of payloads (`PAYLOADS`) is full, i.e. if `FREE_QUEUE` is empty, then
+`schedule_now` early returns with an error. After retrieving a free slot the `payload` is stored
+into it. Then the task - index pair is enqueued into the corresponding priority queue. Finally, the
+interrupt whose handler is being used as task dispatcher is set as *pending* -- this will cause the
+`NVIC` (the hardware scheduler) to execute the handler.
+
+Fetching a free slot from the free queue and enqueuing a task - index pair into the ready queue may
+require critical sections so the queues are accessed as resources using `claim_mut`. In a later
+section we'll analyze where critical sections are required.
diff --git a/book/src/internals/scheduler.md b/book/src/internals/scheduler.md
new file mode 100644
index 0000000000..de022e4d64
--- /dev/null
+++ b/book/src/internals/scheduler.md
@@ -0,0 +1,40 @@
+# The scheduler
+
+The RTFM framework includes a priority based scheduler. In the Cortex-M implementation of RTFM the
+[NVIC][] (Nested Vector Interrupt Controller), a Cortex-M core peripheral, does the actual task
+scheduling -- this greatly reduces the bookkeeping that needs to be done in software.
+
+[NVIC]: https://developer.arm.com/docs/ddi0337/e/nested-vectored-interrupt-controller
+
+All tasks map one way or another to an interrupt. This lets the NVIC schedule tasks as if they were
+interrupts. The NVIC dispatches interrupt handlers according to their priorities; this gives up
+priority based scheduling of tasks for free.
+
+The NVIC contains a interrupt priority registers (IPR) where the *static* priority of an interrupt
+can be set. The priorities assigned to tasks by the user are programmed into these registers after
+`init` ends and before `idle` starts, while the interrupts are disabled.
+
+The IPR registers store priorities in a different way than the user specifies them so a conversion
+is needed. To distinguish these two we refer to the IPR format as *hardware* priority level, and we
+refer to the priority entered in `app!` as the *logical* priority level.
+
+In hardware priority levels a bigger number indicates *lower* urgency and vice versa. Plus, Cortex-M
+devices only support a certain number of priority bits: for example 4 bits equates 16 different
+priority levels. These priority bits correspond to the higher bits of each 8-bit IPR register.
+
+Different devices support different number of priority bits so this needs to be accounted for when
+converting from a logical priority level to a hardware priority level. This is what the conversion
+routine looks like:
+
+``` rust
+// number of priority bits (device specific)
+const NVIC_PRIO_BITS: u8 = 4;
+
+fn logical2hardware(prio: u8) -> u8 {
+    ((1 << NVIC_PRIO_BITS) - prio) << (8 - NVIC_PRIO_BITS)
+}
+```
+
+The RTFM runtime needs to know `NVIC_PRIO_BITS` for the target device to properly configure the
+priority of each task. Currently the `app!` macro expects the `device` crate to contain this
+information as a `u8` constant at `$device::NVIC_PRIO_BITS`.
diff --git a/book/src/internals/tq.md b/book/src/internals/tq.md
new file mode 100644
index 0000000000..897b65ef6f
--- /dev/null
+++ b/book/src/internals/tq.md
@@ -0,0 +1,8 @@
+# The timer queue
+
+In this section we explore the *timer queue*, the backbone of the `scheduled_in` API.
+
+The `schedule_in` method schedules a task run in the future. `schedule_in` doesn't directly enqueue
+tasks into the ready queues, instead it enqueues them in the *timer queue*. The timer queue is a
+priority queue that prioritizes tasks with the nearest scheduled start. Associated to the timer
+queue there is an interrupt handler that moves tasks that have become ready into the ready queues.
diff --git a/book/src/internals/tq/handler.md b/book/src/internals/tq/handler.md
new file mode 100644
index 0000000000..e099cd570c
--- /dev/null
+++ b/book/src/internals/tq/handler.md
@@ -0,0 +1,87 @@
+# The timer queue handler
+
+The `SysTick` exception handler is used as the timer queue handler. This handler takes cares of
+moving tasks that have become ready from the timer queue to their respective ready queues. The
+timer queue makes use of the Cortex-M sytem timer, the `SysTick`, to schedule when the `SysTick`
+handler should run.
+
+This is what the `SYS_TICK` handler looks like for our running example where `a` and `d` are
+scheduled via `scheduled_after`:
+
+``` rust
+unsafe extern "C" fn SYS_TICK() {
+    let mut t = Threshold::new(..);
+
+    loop {
+        let next = TQ.claim_mut(&mut t, |tq, _| {
+            let front = tq.priority_queue.peek().map(|nr| nr.scheduled_start);
+
+            if let Some(scheduled_start) = front {
+                let diff = scheduled_start - Instant::now();
+
+                if diff > 0 {
+                    // task still not ready, schedule this handler to run in the future by
+                    // setting a new timeout
+
+                    // maximum timeout supported by the SysTick
+                    const MAX: u32 = 0x00ffffff;
+
+                    SYST.set_reload(cmp::min(MAX, diff as u32));
+
+                    // start counting from the new reload
+                    SYST.clear_current();
+
+                    None
+                } else {
+                    // task became ready
+                    let nr = tq.priority_queue.pop_unchecked();
+
+                    Some((nr.task, nr.index))
+                }
+            } else {
+                // the queue is empty
+                SYST.disable_interrupt();
+
+                None
+            }
+        });
+
+        if let Some((task, index)) = next {
+            // place the tasks - index pair into the corresponding ready queue
+            match task {
+                __tq::Task::a => {
+                    __1::READY_QUEUE.claim_mut(t, |rq, _| {
+                        rq.enqueue_unchecked((__1::Task::a, index));
+                    });
+
+                    NVIC.set_pending(Interrupt::EXTI0);
+                },
+                __tq::Task::d => {
+                    __2::READY_QUEUE.claim_mut(t, |rq, _| {
+                        rq.enqueue_unchecked((__2::Task::d, index));
+                    });
+
+                    NVIC.set_pending(Interrupt::EXTI1);
+                },
+            }
+        } else {
+            return;
+        }
+    }
+}
+```
+
+The `SYS_TICK` handler will use a `loop` to move all the tasks that have become ready from the
+priority queue, the timer queue, to the ready queues.
+
+To do that the handler will check the front of the priority queue, which contains the task with the
+closest `scheduled_start`. If the queue is empty then the handler will disable the `SysTick`
+exception and return; the handler won't run again until the exception is re-enabled by
+`TimerQueue.enqueue`.
+
+If the priority queue was not empty then the handler will then compare that closest
+`scheduled_start` against the current time (`Instant::now()`). If the `scheduled_start` time has not
+been reached the handler will schedule to run itself in the future by setting a `SysTick` timeout.
+If instead we are past the closest `scheduled_start` then the handler will move the task at the
+front of the queue to its corresponding `READY_QUEUE` and set the corresponding task dispatcher as
+pending.
diff --git a/book/src/internals/tq/schedule-after.md b/book/src/internals/tq/schedule-after.md
new file mode 100644
index 0000000000..f9c0b3420b
--- /dev/null
+++ b/book/src/internals/tq/schedule-after.md
@@ -0,0 +1,115 @@
+# `schedule_after`
+
+Let's see how `schedule_after` adds tasks to the timer queue.
+
+
+``` rust
+mod __schedule_after {
+    impl a {
+        fn schedule_after(
+            &mut self,
+            t: &mut Threshold,
+            offset: u32,
+            payload: i32,
+        ) -> Result<(), i32> {
+            if let Some(index) = a::FREE_QUEUE.dequeue() {
+                core::ptr::write(
+                    &mut a::PAYLOADS[index as usize],
+                    payload,
+                );
+
+                let scheduled_start = self.scheduled_start + offset;
+
+                core::ptr::write(
+                    &mut a::SCHEDULED_STARTS[index as usize],
+                    scheduled_start,
+                );
+
+                let not_ready = NotReady {
+                    index,
+                    scheduled_start,
+                    task: __tq::Task::a,
+                };
+
+                __tq::TIMER_QUEUE.claim_mut(t, |tq, _| tq.enqueue(not_ready));
+            } else {
+                Err(payload)
+            }
+        }
+    }
+}
+```
+
+Like `schedule_now`, `schedule_after` starts by fetching a free slot from the `FREE_QUEUE`. If
+there's no free slot available the function early returns with an error. Once a free slot (`index`)
+has been retrieved the payload is stored in that spot of the payload list (`PAYLOADS`). The
+`scheduled_start` of the newly scheduled task is the `scheduled_start` time of the current task plus
+the specified `offset`. This `scheduled_start` value is also stored in a list (`SCHEDULED_STARTS`)
+at the free slot `index`.  After that's done, the not ready task -- represented by the `NotReady`
+struct which contains the `Task` name, the payload / `scheduled_after` index and the actual
+`scheduled_start` value -- is inserted in the timer queue.
+
+`TimerQueue.enqueue` does a bit more of work than just adding the not ready task to the priority
+queue of tasks:
+
+``` rust
+struct TimerQueue {
+    priority_queue: BinaryHeap<..>,
+}
+
+impl TimerQueue {
+    unsafe fn enqueue(&mut self, new: NotReady) {
+        let mut is_empty = true;
+
+        if self.priority_queue
+            .peek()
+            .map(|head| {
+                is_empty = false;
+                new.scheduled_start < head.scheduled_start
+            })
+            .unwrap_or(true)
+        {
+            if is_empty {
+                SYST.enable_interrupt();
+            }
+
+            SCB.set_pending(Exception::SysTick);
+        }
+
+        self.priority_queue.push_unchecked(new);
+    }
+}
+```
+
+If the priority queue is empty or the new not ready task is scheduled to run *before* the current
+task at the front of the queue then the `SysTick` exception handler is also enabled and set as
+pending. In the next section we'll see the role that this handler plays.
+
+Another important thing to note is that the `Task` enum used in the `NotReady` struct: it only
+contains tasks which can be scheduled via `scheduled_after`. The tasks in this set not necessarily
+are to be dispatched at the same priority.
+
+Consider the following task configuration:
+
+- Tasks `a` and `b` are dispatched at priority 1
+- Tasks `c` and `d` are dispatched at priority 2
+- `a` is scheduled using `schedule_after`
+- `b` is scheduled using `schedule_now`
+- `c` is scheduled using `schedule_now`
+- `d` is scheduled both via `schedule_now` and `scheduled_after`
+
+RTFM will end up creating the following `enum`s:
+
+``` rust
+mod __1 {
+    enum Task { a, b }
+}
+
+mod __2 {
+    enum Task { c, d }
+}
+
+mod __tq {
+    enum Task { a, d }
+}
+```
diff --git a/book/src/preface.md b/book/src/preface.md
new file mode 100644
index 0000000000..3b301b4c7b
--- /dev/null
+++ b/book/src/preface.md
@@ -0,0 +1,4 @@
+# Preface
+
+This book documents the Real Time For the Masses framework from the point of view of a user and from
+the point of view of an implementer.
diff --git a/book/src/user/basic.md b/book/src/user/basic.md
new file mode 100644
index 0000000000..e8aed714a1
--- /dev/null
+++ b/book/src/user/basic.md
@@ -0,0 +1,80 @@
+# Basic organization
+
+This section presents the structure of a basic RTFM program.
+
+Below is shown a minimal RTFM program.
+
+``` rust
+#![feature(proc_macro)] // required to import the `app!` macro
+#![no_std]
+
+extern crate cortex_m_rtfm;
+extern crate panic_abort; // panicking behavior
+extern crate stm32f103xx; // device crate
+
+use cortex_m_rtfm::app;
+
+app! {
+    device: stm32f103xx,
+}
+
+fn init(ctxt: init::Context) -> init::LateResources {
+    // Cortex-M peripherals
+    let core = ctxt.core;
+
+    // device peripherals
+    let device = ctxt.device;
+
+    init::LateResources {} // more on this later
+}
+
+fn idle(ctxt: idle::Context) -> ! {
+    loop {
+        // do stuff here
+    }
+}
+```
+
+All RTFM applications include an invocation of the `app!` macro; this macro is the *specification*
+of the application. At the very least you'll have to declare the device you are using in this macro.
+I'll be using the STM32F103RB microcontroller throughout this book so I'll use `stm32f103xx` for the
+value of the `device` field. The value of this field is the *path* to a device crate, a crate
+generated using `svd2rust`.
+
+The `app!` macro generates the entry point of the program: the `main` function. So, instead of a
+`main` function you have to provide *two* functions: `idle` and `init`. The signatures of those two
+functions are shown in the minimal example. The `main` function generated by the `app!` macro will
+call the `init` function *with interrupts disabled* first and then it will call the never ending
+`idle` function.
+
+The arguments of the `init` and `idle` are these `Context` values. These `Context` structs have the
+following fields:
+
+``` rust
+// generated by the `app!` macro
+mod init {
+    struct Context {
+        // pretty similar to `cortex_m::Peripherals`, minus some fields
+        pub core: CorePeripherals,
+        pub device: stm32f103xx::Peripherals,
+        pub resources: Resources,
+        pub tasks: Tasks,
+        pub threshold: Threshold,
+    }
+
+    // ..
+}
+
+// generated by the `app!` macro
+mod idle {
+    pub struct Context {
+        pub resources: Resources,
+        pub threshold: Threshold,
+    }
+
+    // ..
+}
+```
+
+That covers the structure of a minimal RTFM application. RTFM applications are usually structured as
+a set of *tasks*. In the next section we'll see how create tasks.
diff --git a/book/src/user/events.md b/book/src/user/events.md
new file mode 100644
index 0000000000..95e0d0940e
--- /dev/null
+++ b/book/src/user/events.md
@@ -0,0 +1,65 @@
+# Reacting to events
+
+RTFM main use case is building reactive systems: systems that respond to external stimuli. In RTFM,
+tasks are the main mechanism to respond to *events*, or interrupt sources.
+
+Below is shown an RTFM program with a single *event* task:
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+
+use cortex_m_rtfm::app;
+
+app! {
+    device: stm32f103xx,
+
+    tasks: {
+        exti0: {
+            interrupt: EXTI0, // this interrupt corresponds to the user pressing a button
+        },
+    },
+}
+
+// omitted: init and idle
+
+// the body of the `exti0` task
+fn exti0(ctxt: exti0::Context) {
+    // executed whenever a certain button is pressed
+
+    println!("User pressed a button");
+}
+```
+
+Here we have a task named `exti0` bound to the `EXTI0` interrupt source. The `exti0` task starts,
+i.e. the `exti0` function is called, whenever the `EXTI0` interrupt fires. Interrupts are device
+specific and come from the device crate, `stm32f103xx`. In this case the interrupt `EXTI0` is
+triggered by a change in the logic level of a digital input pin. In this example said pin is
+connected to a button; thus pressing the button triggers the `exti0` task.
+
+Each task has access to a `Context`. The fields of this `Context` struct are:
+
+``` rust
+// generated by the `app!` macro
+mod exti0 {
+    pub struct Context {
+        pub input: (),
+        pub resources: Resources,
+        pub tasks: Tasks,
+        pub threshold: Threshold,
+    }
+
+    // ..
+}
+```
+
+Event tasks map directly to device specific interrupts. The RTFM runtime will take care of both
+*unmasking* those interrupts and setting their priorities in the NVIC *after* `init` ends and
+*before* `idle` starts. Note that in most cases is necessary to also enable the interrupt in the
+device specific peripheral to get the interrupt source to fire the event task.
+
+The other consequence of tasks being interrupts is that tasks won't start until after `init`
+ends -- because interrupts are disabled during `init`. That is if an event occurs during `init` the
+corresponding task will be set as *pending* but it won't start until *after* `init` ends.
diff --git a/book/src/user/guide.md b/book/src/user/guide.md
new file mode 100644
index 0000000000..437ca2fc18
--- /dev/null
+++ b/book/src/user/guide.md
@@ -0,0 +1,4 @@
+# User guide
+
+This section introduces the Real Time For the Masses framework to a new user through various
+examples presented in order of increasing complexity.
diff --git a/book/src/user/late-resources.md b/book/src/user/late-resources.md
new file mode 100644
index 0000000000..65374da1ea
--- /dev/null
+++ b/book/src/user/late-resources.md
@@ -0,0 +1,49 @@
+# Runtime initialized resources
+
+Normal `static` variables in Rust must be assigned an initial value when declared, i.e. at compile
+time. Resources don't have this limitation and can be initialized at *runtime*; these resources are
+called "late resources" because they are initialized *late*. The initial values of late resources
+must be returned by the `init` function.
+
+Consider the following example where we load a cryptographic key from EEPROM and then use the key in
+a task.
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+
+use cortex_m_rtfm::app;
+
+app! {
+    device: stm32f103xx,
+
+    resources: {
+        static KEY: [u8; 256];
+    },
+
+    tasks: {
+        exti0: {
+            interrupt: USART1, // data arrived via the serial interface
+            resources: [KEY],
+        },
+    },
+}
+
+fn init(ctxt: init::Context) -> init::LateResources {
+    let key = load_from_eeprom();
+
+    init::LateResources {
+        KEY: key,
+    }
+}
+
+// omitted: `idle`
+
+fn usart1(ctxt: usart1::Context) {
+    let key: &[u8; 256] = ctxt.resources.KEY;
+
+    // use key to decrypt incoming data
+}
+```
diff --git a/book/src/user/messages.md b/book/src/user/messages.md
new file mode 100644
index 0000000000..57d638e28e
--- /dev/null
+++ b/book/src/user/messages.md
@@ -0,0 +1,148 @@
+# Message passing
+
+So far we have seen tasks as a way to respond to events but events are not the only way to start a
+task. A task can schedule another task, optionally passing a message to it.
+
+For example, consider the following application where data is received from the serial interface and
+collected into a buffer. `\n` is used as a frame delimiter; once a frame has been received we want
+to process the buffer contents but we don't want to do that in the `usart1` task because that task
+has to keep up with the fast incoming data and it should be short and high priority. So, instead we
+*send* the frame to a *lower priority* task for further processing; this way we keep the `usart1`
+task responsive.
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+extern crate heapless;
+
+use cortex_m_rtfm::app;
+use heapless::Vec;
+use heapless::consts::*;
+
+app! {
+    device: stm32f103xx,
+
+    resources: {
+        // 128-byte buffer
+        static BUFFER: Vec<u8, U128> = Vec::new();
+
+        // omitted: other resources
+    },
+
+    tasks: {
+        // task bound to an interrupt
+        usart1: {
+            // event = data arrived via the serial interface
+            interrupt: USART1,
+
+            // higher priority number = more urgent
+            priority: 2,
+
+            // omitted: the exact list of resources assigned to this task
+
+            // tasks that this task can schedule
+            schedule_now: [process],
+        },
+
+        // task schedulable by other tasks
+        process: {
+            // the input this task expects
+            input: Vec<u8, U128>,
+
+            // if omitted `priority` is assumed to be `1`
+            // priority: 1,
+        },
+    },
+}
+
+// omitted: `init` and `idle`
+
+fn usart1(ctxt: usart1::Context) {
+    const FRAME_DELIMITER: u8 = b'\n';
+
+    let t = &mut ctxt.threshold;
+    let tasks = ctxt.tasks;
+
+    let buffer: &mut _ = ctxt.resources.BUFFER;
+    let serial: &mut _ = ctxt.resources.SERIAL;
+
+    let byte = serial.read(); // reads a single byte from the serial interface
+
+    if byte == FRAME_DELIMITER {
+        tasks.process.schedule_now(t, buffer.clone()).unwrap();
+    } else {
+        if buffer.push(byte).is_err() {
+            // omitted: error handling
+        }
+    }
+}
+
+fn process(ctxt: process::Context) {
+    let buffer = ctxt.input;
+
+    match &buffer[..] {
+         "command1" => /* .. */,
+         "command2" => /* .. */,
+         // ..
+         _ => /* .. */,
+    }
+}
+```
+
+Here we have the `exti0` task scheduling the `process` task. The `process` task expects some input;
+the second argument of `schedule_now` is the expected input. This argument will be sent as a message
+to the `process` task.
+
+Only types that implement the `Send` trait and have a `'static` lifetimes can be sent as messages.
+This means that messages can't contain references to things like values allocated on the stack of
+the task or references to the state of a task.
+
+This constrain forces us to sent a copy of the buffer, which is 128 bytes in size, rather than a
+reference, which is 4 bytes in size -- this is rather expensive in terms of memory and execution
+time. In a future section we'll see how to make messages much smaller using object pools.
+
+## How is this different from a function call?
+
+You may be wondering how is message passing different that doing a simple function call as shown
+below:
+
+``` rust
+fn usart1(ctxt: usart1::Context) {
+    const FRAME_DELIMITER: u8 = b'\n';
+
+    let buffer: &mut _ = ctxt.resources.BUFFER;
+    let serial: &mut _ = ctxt.resources.SERIAL;
+
+    let byte = serial.read(); // reads a single byte from the serial interface
+
+    if byte == FRAME_DELIMITER {
+        process(buffer);
+    } else {
+        if buffer.push(byte).is_err() {
+            // omitted: error handling
+        }
+    }
+}
+
+fn process(buffer: &Vec<u8, U128>) {
+    match &buffer[..] {
+         "command1" => /* .. */,
+         "command2" => /* .. */,
+         // ..
+         _ => /* .. */,
+    }
+}
+```
+
+The function call approach even avoids the expensive copy of the buffer!
+
+The main difference is that a function call will execute `process` in the *same* execution context
+as the `usart1` task extending the execution time of the `usart1` task. Whereas making `process`
+into its own task means that it can be scheduled differently.
+
+In this particular case the `process` task has lower priority than the `usart1` task so it won't be
+executed until *after* the `usart1` task ends. Also, preemption is possible: if a `USART1` event
+occurs while executing the `process` task the scheduler will prioritize the execution of the
+`usart1` task. The next section has more details about priority based scheduling.
diff --git a/book/src/user/periodic.md b/book/src/user/periodic.md
new file mode 100644
index 0000000000..0819fb4979
--- /dev/null
+++ b/book/src/user/periodic.md
@@ -0,0 +1,91 @@
+# Periodic tasks
+
+We have seen the `schedule_now` method which is used to schedule tasks to run immediately. RTFM
+also allows scheduling tasks to run some time in the future via the `schedule_in` API. In a nutshell
+the `schedule_in` lets you schedule a task to run in a certain number of clock (HCLK) cycles in the
+future. The offset that the `schedule_in` takes as argument is added to the *scheduled start time*
+of the *current* task to compute the scheduled start time of the newly scheduled task. This lets you
+create periodic tasks without accumulating drift.
+
+**NOTE** Using the `scheduled_in` API requires enabling the "timer-queue" feature.
+
+Let's look at an example:
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+
+use cortex_m_rtfm::app;
+
+app! {
+    device: stm32f103xx,
+
+    init: {
+        schedule_now: [a],
+    },
+
+    tasks: {
+        a: {
+            schedule_in: [a],
+        },
+    },
+}
+
+fn init(ctxt: init::Context) -> init::LateResources {
+    let t = &mut ctxt.threshold;
+
+    ctxt.tasks.a.schedule_now(t, ());
+}
+
+// number of clock cycles equivalent to 1 second
+const S: u32 = 8_000_000;
+
+fn a(ctxt: a::Context) {
+    // `u32` timestamp that corresponds to now
+    let now = rtfm::now();
+
+    let t = &mut ctxt.threshold;
+
+    println!("a(ss={}, now={})", ctxt.scheduled_start, now);
+
+    a.tasks.a.schedule_in(t, 1 * S, ());
+}
+```
+
+This program runs a single task that's executed every second and it prints the following:
+
+``` text
+a(ss=0, now=71)
+a(ss=8000000, now=8000171)
+a(ss=16000000, now=16000171)
+```
+
+`init` is not a task but all tasks scheduled from it assume that `init` has a scheduled start of `t
+= 0` which represents the time at which `init` ends and all tasks can start. `schedule_now` makes
+the scheduled task inherit the scheduled start of the current task; in this case the first instance
+of `a` inherits the scheduled start of `init`, that is `t = 0`.
+
+Task `a` schedules itself to run `S` cycles (1 second) in the future. The scheduled start of
+its next instance will be its current scheduled start plus `S` cycles. Thus, the second instance of
+`a` is scheduled to start at `t = 1 * S`, the third instance is scheduled to start at `t = 2 * S`
+and so on. Note that it doesn't matter when or where in the body of `a` `schedule_in` is invoked;
+the outcome will be the same.
+
+Now the `scheduled_start` of a task is not the *exact* time at which the task will run -- this can
+be seen in the output of the above program: `now` doesn't match the scheduled start. There's some
+overhead in the task dispatcher so a task will usually run dozens of cycles after its scheduled
+start time. Also, priority based scheduling can make lower priority tasks run much later than their
+scheduled start time; for example, imagine the scenario where two tasks have the same scheduled
+start but different priorities.
+
+## `scheduled_in` and events
+
+Tasks that spawn from `init` have predictable scheduled starts because `init` itself has a scheduled
+start of `t = 0`, but what happens with tasks triggered by events which can start at any time? These
+tasks use `rtfm::now()` as an *estimate* of their scheduled start. In the best-case scenario
+`rtfm::now()` will be very close to the time at which the event happened. But, if the task has low
+priority it may not run until other high priority tasks are done; in this scenario `rtfm::now()`,
+and thus the estimated scheduled start, could be very far off from the real time at which the event
+happened. Take this in consideration when using `scheduled_in` from tasks triggered by events.
diff --git a/book/src/user/pools.md b/book/src/user/pools.md
new file mode 100644
index 0000000000..7e455616e2
--- /dev/null
+++ b/book/src/user/pools.md
@@ -0,0 +1,127 @@
+# Object pools
+
+Let's revisit the message passing example from a few sections ago and make it more efficient using
+object pools.
+
+`heapless` provides an object pool abstraction named `Pool` that uses *singleton* buffers. A
+singleton buffer is statically allocated and represented by a singleton type, a type of which can
+only ever exist one instance of. Normally, `Pool` is `unsafe` to use because the user has to enforce
+the singleton requirement of the buffer. RTFM makes `Pool` safe by enforcing the singleton property
+of buffers. RTFM accomplishes this by turning all uninitialized resources of array type assigned to
+`init` into singleton buffers.
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+extern crate heapless;
+
+use cortex_m_rtfm::app;
+use heapless::Vec;
+use heapless::consts::*;
+use heapless::pool::{Object, Pool, Uninit};
+
+app! {
+    device: stm32f103xx,
+
+    resources: {
+        static BUFFER: Option<Object<A>> = None;
+
+        // memory for the `POOL`
+        static V: [Vec<u8, U128>; 2];
+        static POOL: Pool<V>;
+        // ..
+    },
+
+    init: {
+        resources: [V],
+    },
+
+    tasks: {
+        usart1: {
+            interrupt: USART1,
+
+            priority: 2,
+
+            resources: [BUFFER, POOL, SERIAL],
+        },
+
+        process: {
+            input: Object<V>,
+
+            // priority: 1,
+
+            // `POOL` is shared with the `usart1` task
+            resources: [POOL],
+        },
+    },
+}
+
+fn init(ctxt: init::Context) -> init::LateResources {
+    // ..
+
+    let v: Uninit<V> = ctxt.resources.V;
+
+    init::LateResources {
+        POOL: Pool::new(v),
+    }
+}
+
+fn usart1(ctxt: usart1::Context) {
+    const FRAME_DELIMITER: u8 = b'\n';
+
+    let t = &mut ctxt.threshold;
+    let tasks = ctxt.tasks;
+
+    let rbuffer: &mut _ = ctxt.resources.BUFFER;
+    let pool: &mut _ = ctxt.resources.POOL.borrow_mut(t);
+    let serial: &mut _ = ctxt.resources.SERIAL;
+
+    if rbuffer.is_none() {
+        // grab a buffer from the pool
+        *rbuffer = Some(pool.alloc().unwrap().init(Vec::new()));
+    }
+
+    let buffer = rbuffer.take().unwrap();
+
+    let byte = serial.read();
+
+    if byte == FRAME_DELIMITER {
+        // send the buffer to the `process` task
+        tasks.process.schedule_now(t, buffer).unwrap();
+    } else {
+        if buffer.push(byte).is_err() {
+            // omitted: error handling
+        }
+
+        rbuffer = Some(buffer);
+    }
+}
+
+fn process(ctxt: process::Context) {
+    let buffer = ctxt.input;
+
+    // process buffer
+    match &buffer[..] {
+         "command1" => /* .. */,
+         "command2" => /* .. */,
+         // ..
+         _ => /* .. */,
+    }
+
+    // return the buffer to the pool
+    let t = &mut ctxt.threshold;
+    ctxt.resources.POOL.claim_mut(t, |pool, _| pool.dealloc(buffer));
+}
+```
+
+In this new version we use an object `Pool` that contains two instances of `Vec<u8, U128>`. The
+`usart1` task will fill one of the vectors in the `Pool` with data until it finds the frame
+delimiter. Once a frame is completed it will send the frame as an `Object` to the `process` task.
+Unlike the previous version, the `Object` value is very cheap to send (move): it's just a single
+byte in size. In the next iteration `usart1` will grab a fresh, different vector from the `Pool` and
+repeat the process.
+
+Once the `process` task is done processing the buffer it will proceed to return it to the object
+`Pool`.
diff --git a/book/src/user/scheduling.md b/book/src/user/scheduling.md
new file mode 100644
index 0000000000..0256efb6a0
--- /dev/null
+++ b/book/src/user/scheduling.md
@@ -0,0 +1,126 @@
+# Priority based scheduling
+
+We have talked about tasks but we have glossed over how they are scheduled. RTFM uses a priority
+based scheduler: tasks with higher priority will preempt lower priority ones. Once a task starts it
+runs to completion and will only be suspended if a higher priority task needs to be executed, but
+once the higher priority task finishes the lower priority one resumes execution.
+
+Let's illustrate how scheduling works with an example:
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+
+use cortex_m_rtfm::app;
+
+app! {
+    device: stm32f103xx,
+
+    init: {
+        schedule_now: [a],
+    },
+
+    tasks: {
+        a: {
+            // priority: 1,
+            schedule_now: [c],
+        },
+        b: {
+            priority: 2,
+        },
+        c: {
+            priority: 3,
+            schedule_now: [b],
+        },
+    },
+}
+
+fn init(ctxt: init::Context) -> init::LateResources {
+    let t = &mut ctxt.threshold;
+
+    println!("IN1");
+
+    ctxt.tasks.a.schedule_now(t, ());
+
+    println!("IN2");
+
+    init::LateResources {}
+}
+
+fn idle(ctxt: idle::Context) -> ! {
+    println!("ID");
+
+    loop {
+        // ..
+    }
+}
+
+fn a(ctxt: a::Context) {
+    let t = &mut ctxt.threshold;
+
+    println!("A1");
+
+    ctxt.tasks.c.schedule_now(t, ());
+
+    println!("A2");
+}
+
+fn b(ctxt: b::Context) {
+    let t = &mut ctxt.threshold;
+
+    println!("B");
+}
+
+fn c(ctxt: c::Context) {
+    let t = &mut ctxt.threshold;
+
+    println!("C1");
+
+    ctxt.tasks.b.schedule_now(t, ());
+
+    println!("C2");
+}
+```
+
+This program prints:
+
+``` text
+IN1
+IN2
+A1
+C1
+C2
+B
+A2
+ID
+```
+
+The RTFM scheduler is actually hardware based and built on top of the NVIC (Nested Vector Interrupt
+Controller) peripheral and the interrupt mechanism of the Cortex-M architecture so tasks can't
+run while the interrupts are disabled. Thus tasks scheduled during `init` won't run until *after*
+`init` ends regardless of their priority.
+
+The program execution goes like this:
+
+- `init` prints "I1". Then task `a` is scheduled to run immediately but nothing happens because
+  interrupts are disabled. `init` prints "I2".
+
+- `init` ends and now tasks can run. Task `a` preempts `idle`, which runs after `init` . `idle`
+  is not a task per se because it's never ending, but it has the lowest priority (priority = 0) so
+  all tasks can preempt it -- all tasks have a priority of 1 or larger.
+
+- Task `a` prints "A1" and then schedules task `c` to run immediately. Because task `c` has higher
+  priority than task `a` it preempts `a`.
+
+- Task `c` starts and print "C1". Then it schedules task `b` to run immediately. Because task `b`
+  has lower priority than `c` it gets postponed. Task `c` prints "C2" and returns.
+
+- After task `c` ends task `a` should be resumed but task `b` is pending and has higher priority so
+  task `b` preempts `a`. Task `b` prints "B" and ends.
+
+- Task `a` is finally resumed. Task `a` prints "A2" and returns.
+
+- After task `a` ends there's no task pending execution so `idle` is resumed. `idle` prints "ID" and
+  then executes some infinite `loop`.
diff --git a/book/src/user/sharing.md b/book/src/user/sharing.md
new file mode 100644
index 0000000000..e872b59ded
--- /dev/null
+++ b/book/src/user/sharing.md
@@ -0,0 +1,194 @@
+# Resource sharing
+
+We mentioned that in RTFM message passing is preferred over sharing state but sometimes the need of
+shared state arises so let's look at an example.
+
+Let's say we have an application with three tasks: one reads data from an accelerometer, the other
+reads data from a gyroscope and the last one processes both the accelerometer and gyroscope data.
+The first two tasks run periodically at 1 KHz (one thousand times per second); the third task must
+start after the other two tasks are done and consumes the data each task produces. Here's one way to
+implement such program:
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+
+use cortex_m_rtfm::app;
+
+struct Acceleration { x: u16, y: u16, z: u16 }
+
+struct AngularRate { x: u16, y: u16, z: u16 }
+
+enum Data {
+    Empty,
+    Acceleration(Acceleration),
+    AngularRate(AngularRate),
+}
+
+app! {
+    device: stm32f103xx,
+
+    resources: {
+        static DATA: Data = Data::Empty;
+
+        // omitted: other resources
+    },
+
+    tasks: {
+        accelerometer: {
+            resources: [ACCELEROMETER, DATA],
+
+            schedule_now: [process],
+
+            // priority: 1,
+
+            // omitted: interrupt source
+        },
+
+        gyroscope: {
+            resources: [GYROSCOPE, DATA],
+
+            schedule_now: [process],
+
+            // priority: 1,
+
+            // omitted: interrupt source
+        },
+
+        process: {
+            input: (Acceleration, AngularRate),
+        },
+    }
+}
+
+// omitted: `init`, `idle` and `process`
+
+fn accelerometer(ctxt: accelerometer::Context) {
+    let accelerometer = ctxt.resources.ACCELEROMETER;
+    let acceleration = accelerometer.read();
+
+    let t = &mut ctxt.threshold;
+
+    let angular_rate = {
+        let data: &mut Data = ctxt.resources.DATA.borrow_mut(t);
+
+        match *data {
+            // store data
+            Data::Empty => {
+                *data = Data::Acceleration(acceleration);
+                None
+            },
+
+            // overwrite old data
+            Data::Acceleration(..) => {
+                *data = Data::Acceleration(acceleration);
+                None
+            },
+
+            // data pair is ready
+            Data::AngularRate(angular_rate) => {
+                *data = Data::Empty;
+                Some(angular_rate)
+            },
+        }
+    };
+
+    if let Some(angular_rate) = angular_rate {
+        ctxt.tasks.process.schedule_now(t, (acceleration, angular_rate)).unwrap();
+    }
+}
+
+fn gyroscope(ctxt: accelerometer::Context) {
+    let gyroscope = ctxt.resources.GYROSCOPE;
+    let angular_rate = gyroscope.read();
+
+    let t = &mut ctxt.threshold;
+
+    let acceleration = {
+        let data = ctxt.resources.DATA.borrow_mut(t);
+
+        match *data {
+            // store data
+            Data::Empty => {
+                *data = Data::AngularRate(angular_rate);
+                None
+            },
+
+            // data pair is ready
+            Data::Acceleration(acceleration) => {
+                *data = Data::Empty;
+                Some(acceleration)
+            },
+
+            // overwrite old data
+            Data::AngularRate(angular_rate) => {
+                *data = Data::AngularRate(angular_rate);
+                None
+            },
+        }
+    };
+
+    if let Some(acceleration) = acceleration {
+        ctxt.tasks.process.schedule_now(t, (acceleration, angular_rate)).unwrap();
+    }
+}
+```
+
+In this program the tasks `acceloremeter` and `gyroscope` share the `DATA` resource. This resource
+can contain either sensor reading or no data at all. The idea is that either sensor task can start
+the `process` task but only the one that has both readings will do. That's where `DATA` comes in: if
+the `accelerometer` task happens first it stores its reading into `DATA`; then when the `gyroscope`
+task occurs it *takes* the acceloremeter reading from `DATA`, leaving it empty, and schedules the
+`process` task passing both readings. This setup also supports the other scenario where the
+`gyroscope` task starts before the `accelerometer` task.
+
+In this particular case both sensor tasks operate at the same priority so preemption is not
+possible: if both tasks need to run at about the same time one will run *after* the other. Without
+preemption a data race is not possible so each task can directly borrow (`borrow` / `borrow_mut`)
+the contents of `DATA`.
+
+## `claim*`
+
+If, instead, the sensor tasks had different priorities then the lowest priority task would need to
+*claim* (`claim` / `claim_mut`) the resource. `claim*` creates a critical section and grants access
+to the contents of a resource for the span of the critical section. To illustrate let's increase the
+priority of `accelerometer` to 2; `gyroscope` would then have to access `DATA` like this:
+
+``` rust
+fn gyroscope(ctxt: accelerometer::Context) {
+    let gyroscope = ctxt.resources.GYROSCOPE;
+    let angular_rate = gyroscope.read();
+
+    let t = &mut ctxt.threshold;
+
+    let acceleration = ctxt.resources.DATA.claim_mut(t, |data: &mut Data, _| {
+        // start of critical section
+        match *data {
+            // store data
+            Data::Empty => {
+                *data = Data::AngularRate(angular_rate);
+                None
+            },
+
+            // data pair is ready
+            Data::Acceleration(acceleration) => {
+                *data = Data::Empty;
+                Some(acceleration)
+            },
+
+            // overwrite old data
+            Data::AngularRate(angular_rate) => {
+                *data = Data::AngularRate(angular_rate);
+                None
+            },
+        }
+        // end of critical section
+    });
+
+    if let Some(acceleration) = acceleration {
+        ctxt.tasks.process.schedule_now(t, (acceleration, angular_rate)).unwrap();
+    }
+}
+```
diff --git a/book/src/user/state.md b/book/src/user/state.md
new file mode 100644
index 0000000000..41ef9d3f0c
--- /dev/null
+++ b/book/src/user/state.md
@@ -0,0 +1,48 @@
+# Adding state
+
+Tasks are stateless by default; state can be added by assigning them *resources*. Resources are
+`static` variables that can be assigned to tasks. If a resource is assigned to a single task then
+it's *owned* by that task and the task has exclusive access to the resource. A resource can also be
+*shared* by two or more tasks; when shared a resource must be `claim`ed (which may involve a lock)
+before its data can be accessed -- this prevents data races. In RTFM it's preferred to use message
+passing (more on that later) instead of sharing state.
+
+The following example shows how to assign a resource to a task to preserve state across the
+different invocations of the task.
+
+``` rust
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm;
+
+use cortex_m_rtfm::app;
+
+app! {
+    device: stm32f103xx,
+
+    // declare resources
+    resources: {
+        // number of times the user pressed the button
+        static PRESSES: u32 = 0;
+    },
+
+    tasks: {
+        exti0: {
+            interrupt: EXTI0,
+
+            // assign the `PRESSES` resource to the `exti0` task
+            resources: [PRESSES],
+        },
+    },
+}
+
+// omitted: `init` and `idle`
+
+fn exti0(ctxt: exti0::Context) {
+    let presses: &mut u32 = ctxt.resources.PRESSES;
+    *presses += 1;
+
+    println!("Button pressed {} times!", *presses);
+}
+```
diff --git a/ci/script.sh b/ci/script.sh
index 1e0585a741..a410bd06cf 100644
--- a/ci/script.sh
+++ b/ci/script.sh
@@ -9,29 +9,43 @@ main() {
 
     # examples that don't require the timer-queue feature
     local examples=(
-        async
+        schedule-now
         empty
-        interrupt
+        event-task
     )
 
+    # without timer-queue
+    cargo check --target $TARGET
+
+    for ex in ${examples[@]}; do
+        cargo build --target $TARGET --example $ex
+        cargo build --target $TARGET --example $ex --release
+    done
+
+    # with timer-queue
+    cargo check --features timer-queue --target $TARGET
+
+    cargo build --features timer-queue --target $TARGET --examples
+    cargo build --features timer-queue --target $TARGET --examples --release
+
+    # test again but with the cm7-r0p1 feature enabled
     case $TARGET in
         thumbv7em-none-eabi*)
+            # without timer-queue
             cargo check --target $TARGET --features cm7-r0p1
+
             for ex in ${examples[@]}; do
-                cargo check --target $TARGET --features cm7-r0p1 --example $ex
+                cargo build --target $TARGET --features cm7-r0p1 --example $ex
+                cargo build --target $TARGET --features cm7-r0p1 --example $ex --release
             done
 
-            cargo check timer-queue --target $TARGET --features "cm7-r0p1 timer-queue"
-            cargo check --target $TARGET --features "cm7-r0p1 timer-queue" --examples
-        ;;
-    esac
+            # with timer-queue
+            cargo check --target $TARGET --features "cm7-r0p1 timer-queue"
 
-    cargo check --target $TARGET
-    for ex in ${examples[@]}; do
-        cargo check --target $TARGET --features cm7-r0p1 --example $ex
-    done
-    cargo check --features timer-queue --target $TARGET
-    cargo check --features timer-queue --target $TARGET --examples
+            cargo build --target $TARGET --features "cm7-r0p1 timer-queue" --examples
+            cargo build --target $TARGET --features "cm7-r0p1 timer-queue" --examples --release
+            ;;
+    esac
 }
 
 main
diff --git a/examples/empty.rs b/examples/empty.rs
index 85ec24ce8d..7c7b0b823d 100644
--- a/examples/empty.rs
+++ b/examples/empty.rs
@@ -1,14 +1,18 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt;
 extern crate cortex_m_rtfm as rtfm;
-extern crate panic_abort;
+extern crate panic_semihosting;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
+use cortex_m_rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -26,3 +30,17 @@ fn idle(_ctxt: idle::Context) -> ! {
         asm::wfi();
     }
 }
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/interrupt.rs b/examples/event-task.rs
similarity index 61%
rename from examples/interrupt.rs
rename to examples/event-task.rs
index 6b0b3a670c..3a51c5b4f6 100644
--- a/examples/interrupt.rs
+++ b/examples/event-task.rs
@@ -1,14 +1,18 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -34,3 +38,17 @@ fn idle(_ctxt: idle::Context) -> ! {
 }
 
 fn exti0(_ctxt: exti0::Context) {}
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/periodic-payload.rs b/examples/periodic-payload.rs
index 6a91e03063..9697934fd9 100644
--- a/examples/periodic-payload.rs
+++ b/examples/periodic-payload.rs
@@ -5,16 +5,16 @@
 // # -Os
 //
 // init
-// a(bl=8000000, now=8000180, input=0)
-// a(bl=16000000, now=16000180, input=1)
-// a(bl=24000000, now=24000180, input=2)
+// a(st=8000000, now=8000180, input=0)
+// a(st=16000000, now=16000180, input=1)
+// a(st=24000000, now=24000180, input=2)
 //
 // # -O3
 //
 // init
-// a(bl=8000000, now=8000168, input=0)
-// a(bl=16000000, now=16000168, input=1)
-// a(bl=24000000, now=24000168, input=2)
+// a(st=8000000, now=8000168, input=0)
+// a(st=16000000, now=16000168, input=1)
+// a(st=24000000, now=24000168, input=2)
 //
 // # Indices (new)
 //
@@ -23,28 +23,32 @@
 // ## -O3
 //
 // init
-// a(bl=8000000, now=8000164, input=0)
-// a(bl=16000000, now=16000164, input=1)
+// a(st=8000000, now=8000164, input=0)
+// a(st=16000000, now=16000164, input=1)
 //
 // ## -Os
 //
 // init
-// a(bl=8000000, now=8000179, input=0)
-// a(bl=16000000, now=16000179, input=1)
+// a(st=8000000, now=8000179, input=0)
+// a(st=16000000, now=16000179, input=1)
 
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 #[macro_use]
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
 use cortex_m::peripheral::{DWT, ITM};
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -55,14 +59,14 @@ app! {
     },
 
     init: {
-        async_after: [a],
+        schedule_now: [a],
     },
 
     free_interrupts: [EXTI0],
 
     tasks: {
         a: {
-            async_after: [a],
+            schedule_after: [a],
             input: u16,
             resources: [ITM],
         },
@@ -76,7 +80,7 @@ const S: u32 = 1_000 * MS;
 fn init(mut ctxt: init::Context) -> init::LateResources {
     iprintln!(&mut ctxt.core.ITM.stim[0], "init");
 
-    ctxt.async.a.post(&mut ctxt.threshold, 1 * S, 0).ok();
+    ctxt.tasks.a.schedule_now(&mut ctxt.priority, 0).ok();
 
     init::LateResources { ITM: ctxt.core.ITM }
 }
@@ -92,18 +96,32 @@ fn a(mut ctxt: a::Context) {
     let now = DWT::get_cycle_count();
     let input = ctxt.input;
 
-    let bl = ctxt.baseline;
+    let st = ctxt.scheduled_time;
     let itm = ctxt.resources.ITM;
     iprintln!(
         &mut itm.stim[0],
-        "a(bl={}, now={}, input={})",
-        bl,
+        "a(st={}, now={}, input={})",
+        st,
         now,
         input
     );
 
-    ctxt.async
+    ctxt.tasks
         .a
-        .post(&mut ctxt.threshold, 1 * S, input + 1)
+        .schedule_after(&mut ctxt.priority, 1 * S, input + 1)
         .ok();
 }
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/periodic-preemption-payload.rs b/examples/periodic-preemption-payload.rs
index fbf367badd..0207b4546e 100644
--- a/examples/periodic-preemption-payload.rs
+++ b/examples/periodic-preemption-payload.rs
@@ -4,30 +4,30 @@
 //
 // ## -Os
 //
-// a(bl=16000000, now=16000248, input=0)
-// b(bl=24000000, now=24000251, input=0)
-// a(bl=32000000, now=32000248, input=1)
-// b(bl=48000000, now=48000283, input=1)
-// a(bl=48000000, now=48002427, input=2)
-// a(bl=64000000, now=64000248, input=3)
-// b(bl=72000000, now=72000251, input=2)
-// a(bl=80000000, now=80000248, input=4)
-// b(bl=96000000, now=96000283, input=3)
-// a(bl=96000000, now=96002427, input=5)
+// a(st=16000000, now=16000248, input=0)
+// b(st=24000000, now=24000251, input=0)
+// a(st=32000000, now=32000248, input=1)
+// b(st=48000000, now=48000283, input=1)
+// a(st=48000000, now=48002427, input=2)
+// a(st=64000000, now=64000248, input=3)
+// b(st=72000000, now=72000251, input=2)
+// a(st=80000000, now=80000248, input=4)
+// b(st=96000000, now=96000283, input=3)
+// a(st=96000000, now=96002427, input=5)
 //
 // ## -O3
 //
 // init
-// a(bl=16000000, now=16000231, input=0)
-// b(bl=24000000, now=24000230, input=0)
-// a(bl=32000000, now=32000231, input=1)
-// b(bl=48000000, now=48000259, input=1)
-// a(bl=48000000, now=48002397, input=2)
-// a(bl=64000000, now=64000231, input=3)
-// b(bl=72000000, now=72000230, input=2)
-// a(bl=80000000, now=80000231, input=4)
-// b(bl=96000000, now=96000259, input=3)
-// a(bl=96000000, now=96002397, input=5)
+// a(st=16000000, now=16000231, input=0)
+// b(st=24000000, now=24000230, input=0)
+// a(st=32000000, now=32000231, input=1)
+// b(st=48000000, now=48000259, input=1)
+// a(st=48000000, now=48002397, input=2)
+// a(st=64000000, now=64000231, input=3)
+// b(st=72000000, now=72000230, input=2)
+// a(st=80000000, now=80000231, input=4)
+// b(st=96000000, now=96000259, input=3)
+// a(st=96000000, now=96002397, input=5)
 //
 // # Indices (new)
 //
@@ -36,44 +36,48 @@
 // ## -O3
 //
 // init
-// a(bl=16000000, now=16000193, input=0)
-// b(bl=24000000, now=24000196, input=0)
-// a(bl=32000000, now=32000193, input=1)
-// b(bl=48000000, now=48000225, input=1)
-// a(bl=48000000, now=48001958, input=2)
-// a(bl=64000000, now=64000193, input=3)
-// b(bl=72000000, now=72000196, input=2)
-// a(bl=80000000, now=80000193, input=4)
-// b(bl=96000000, now=96000225, input=3)
-// a(bl=96000000, now=96001958, input=5)
+// a(st=16000000, now=16000193, input=0)
+// b(st=24000000, now=24000196, input=0)
+// a(st=32000000, now=32000193, input=1)
+// b(st=48000000, now=48000225, input=1)
+// a(st=48000000, now=48001958, input=2)
+// a(st=64000000, now=64000193, input=3)
+// b(st=72000000, now=72000196, input=2)
+// a(st=80000000, now=80000193, input=4)
+// b(st=96000000, now=96000225, input=3)
+// a(st=96000000, now=96001958, input=5)
 //
 // ## -Os
 //
 // init
-// a(bl=16000000, now=16000257, input=0)
-// b(bl=24000000, now=24000252, input=0)
-// a(bl=32000000, now=32000257, input=1)
-// b(bl=48000000, now=48000284, input=1)
-// a(bl=48000000, now=48002326, input=2)
-// a(bl=64000000, now=64000257, input=3)
-// b(bl=72000000, now=72000252, input=2)
-// a(bl=80000000, now=80000257, input=4)
-// b(bl=96000000, now=96000284, input=3)
-// a(bl=96000000, now=96002326, input=5)
+// a(st=16000000, now=16000257, input=0)
+// b(st=24000000, now=24000252, input=0)
+// a(st=32000000, now=32000257, input=1)
+// b(st=48000000, now=48000284, input=1)
+// a(st=48000000, now=48002326, input=2)
+// a(st=64000000, now=64000257, input=3)
+// b(st=72000000, now=72000252, input=2)
+// a(st=80000000, now=80000257, input=4)
+// b(st=96000000, now=96000284, input=3)
+// a(st=96000000, now=96002326, input=5)
 
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 #[macro_use]
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
 use cortex_m::peripheral::{DWT, ITM};
+use rt::ExceptionFrame;
 use rtfm::{app, Resource};
 
 app! {
@@ -84,20 +88,20 @@ app! {
     },
 
     init: {
-        async_after: [a, b],
+        schedule_now: [a, b],
     },
 
     free_interrupts: [EXTI0, EXTI1],
 
     tasks: {
         a: {
-            async_after: [a],
+            schedule_after: [a],
             input: u16,
             resources: [ITM],
         },
 
         b: {
-            async_after: [b],
+            schedule_after: [b],
             input: u16,
             priority: 2,
             resources: [ITM],
@@ -112,8 +116,8 @@ const S: u32 = 1_000 * MS;
 fn init(mut ctxt: init::Context) -> init::LateResources {
     iprintln!(&mut ctxt.core.ITM.stim[0], "init");
 
-    ctxt.async.a.post(&mut ctxt.threshold, 2 * S, 0).ok();
-    ctxt.async.b.post(&mut ctxt.threshold, 3 * S, 0).ok();
+    ctxt.tasks.a.schedule_now(&mut ctxt.priority, 0).ok();
+    ctxt.tasks.b.schedule_now(&mut ctxt.priority, 0).ok();
 
     init::LateResources { ITM: ctxt.core.ITM }
 }
@@ -129,36 +133,50 @@ fn a(mut ctxt: a::Context) {
     let now = DWT::get_cycle_count();
 
     let input = ctxt.input;
-    let bl = ctxt.baseline;
-    ctxt.resources.ITM.claim_mut(&mut ctxt.threshold, |itm, _| {
+    let st = ctxt.scheduled_time;
+    ctxt.resources.ITM.claim_mut(&mut ctxt.priority, |itm, _| {
         iprintln!(
             &mut itm.stim[0],
-            "a(bl={}, now={}, input={})",
-            bl,
+            "a(st={}, now={}, input={})",
+            st,
             now,
             input
         );
     });
 
-    ctxt.async
+    ctxt.tasks
         .a
-        .post(&mut ctxt.threshold, 2 * S, input + 1)
+        .schedule_after(&mut ctxt.priority, 2 * S, input + 1)
         .ok();
 }
 
 fn b(mut ctxt: b::Context) {
     let now = DWT::get_cycle_count();
 
-    let bl = ctxt.baseline;
+    let st = ctxt.scheduled_time;
     let input = ctxt.input;
-    let t = &mut ctxt.threshold;
+    let t = &mut ctxt.priority;
     iprintln!(
         &mut ctxt.resources.ITM.borrow_mut(t).stim[0],
-        "b(bl={}, now={}, input={})",
-        bl,
+        "b(st={}, now={}, input={})",
+        st,
         now,
         input,
     );
 
-    ctxt.async.b.post(t, 3 * S, input + 1).ok();
+    ctxt.tasks.b.schedule_after(t, 3 * S, input + 1).ok();
+}
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
 }
diff --git a/examples/periodic-preemption.rs b/examples/periodic-preemption.rs
index a458d2bfc7..e1e450e69c 100644
--- a/examples/periodic-preemption.rs
+++ b/examples/periodic-preemption.rs
@@ -65,16 +65,20 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 #[macro_use]
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
 use cortex_m::peripheral::{DWT, ITM};
+use rt::ExceptionFrame;
 use rtfm::{app, Resource};
 
 app! {
@@ -85,19 +89,19 @@ app! {
     },
 
     init: {
-        async_after: [a, b],
+        schedule_now: [a, b],
     },
 
     free_interrupts: [EXTI0, EXTI1],
 
     tasks: {
         a: {
-            async_after: [a],
+            schedule_after: [a],
             resources: [ITM],
         },
 
         b: {
-            async_after: [b],
+            schedule_after: [b],
             priority: 2,
             resources: [ITM],
         },
@@ -111,8 +115,8 @@ const S: u32 = 1_000 * MS;
 fn init(mut ctxt: init::Context) -> init::LateResources {
     iprintln!(&mut ctxt.core.ITM.stim[0], "init");
 
-    ctxt.async.a.post(&mut ctxt.threshold, 2 * S, ()).ok();
-    ctxt.async.b.post(&mut ctxt.threshold, 3 * S, ()).ok();
+    ctxt.tasks.a.schedule_now(&mut ctxt.priority).ok();
+    ctxt.tasks.b.schedule_now(&mut ctxt.priority).ok();
 
     init::LateResources { ITM: ctxt.core.ITM }
 }
@@ -127,25 +131,39 @@ fn idle(_ctxt: idle::Context) -> ! {
 fn a(mut ctxt: a::Context) {
     let now = DWT::get_cycle_count();
 
-    let bl = ctxt.baseline;
-    ctxt.resources.ITM.claim_mut(&mut ctxt.threshold, |itm, _| {
-        iprintln!(&mut itm.stim[0], "a(bl={}, now={})", bl, now);
+    let st = ctxt.scheduled_time;
+    ctxt.resources.ITM.claim_mut(&mut ctxt.priority, |itm, _| {
+        iprintln!(&mut itm.stim[0], "a(st={}, now={})", st, now);
     });
 
-    ctxt.async.a.post(&mut ctxt.threshold, 2 * S, ()).ok();
+    ctxt.tasks.a.schedule_after(&mut ctxt.priority, 2 * S).ok();
 }
 
 fn b(mut ctxt: b::Context) {
     let now = DWT::get_cycle_count();
 
-    let bl = ctxt.baseline;
-    let t = &mut ctxt.threshold;
+    let st = ctxt.scheduled_time;
+    let t = &mut ctxt.priority;
     iprintln!(
         &mut ctxt.resources.ITM.borrow_mut(t).stim[0],
-        "b(bl={}, now={})",
-        bl,
+        "b(st={}, now={})",
+        st,
         now
     );
 
-    ctxt.async.b.post(t, 3 * S, ()).ok();
+    ctxt.tasks.b.schedule_after(t, 3 * S).ok();
+}
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
 }
diff --git a/examples/periodic.rs b/examples/periodic.rs
index ff5160349f..90b3abeba6 100644
--- a/examples/periodic.rs
+++ b/examples/periodic.rs
@@ -5,13 +5,13 @@
 // ## -Os
 //
 // init
-// a(bl=8000000, now=8000180)
-// a(bl=16000000, now=16000180)
+// a(st=8000000, now=8000180)
+// a(st=16000000, now=16000180)
 //
 // ## -O3
 //
-// a(bl=8000000, now=8000168)
-// a(bl=16000000, now=16000168)
+// a(st=8000000, now=8000168)
+// a(st=16000000, now=16000168)
 //
 // # Indices (new)
 //
@@ -20,29 +20,34 @@
 // ## -Os
 //
 // init
-// a(bl=8000000, now=8000176)
-// a(bl=16000000, now=16000176)
+// a(st=8000000, now=8000176)
+// a(st=16000000, now=16000176)
 //
 // ## -O3
 //
 // init
-// a(bl=0, now=68)
-// a(bl=8000000, now=8000165)
-// a(bl=16000000, now=16000165)
+// a(st=0, now=68)
+// a(st=8000000, now=8000165)
+// a(st=16000000, now=16000165)
 
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 #[macro_use]
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
+// extern crate panic_itm;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
 use cortex_m::peripheral::{DWT, ITM};
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -53,14 +58,14 @@ app! {
     },
 
     init: {
-        async: [a],
+        schedule_now: [a],
     },
 
     free_interrupts: [EXTI0],
 
     tasks: {
         a: {
-            async_after: [a],
+            schedule_after: [a],
             resources: [ITM],
         },
     },
@@ -73,7 +78,7 @@ const S: u32 = 1_000 * MS;
 fn init(mut ctxt: init::Context) -> init::LateResources {
     iprintln!(&mut ctxt.core.ITM.stim[0], "init");
 
-    ctxt.async.a.post(&mut ctxt.threshold, ()).ok();
+    ctxt.tasks.a.schedule_now(&mut ctxt.priority).ok();
 
     init::LateResources { ITM: ctxt.core.ITM }
 }
@@ -88,9 +93,23 @@ fn idle(_ctxt: idle::Context) -> ! {
 fn a(mut ctxt: a::Context) {
     let now = DWT::get_cycle_count();
 
-    let bl = ctxt.baseline;
+    let st = ctxt.scheduled_time;
     let itm = ctxt.resources.ITM;
-    iprintln!(&mut itm.stim[0], "a(bl={}, now={})", bl, now);
+    iprintln!(&mut itm.stim[0], "a(st={}, now={})", st, now);
 
-    ctxt.async.a.post(&mut ctxt.threshold, 1 * S, ()).ok();
+    ctxt.tasks.a.schedule_after(&mut ctxt.priority, 1 * S).ok();
+}
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
 }
diff --git a/examples/interrupt-async-after.rs b/examples/schedule-after-from-event-task.rs
similarity index 62%
rename from examples/interrupt-async-after.rs
rename to examples/schedule-after-from-event-task.rs
index 1e7a3ac0ab..2e666a7d56 100644
--- a/examples/interrupt-async-after.rs
+++ b/examples/schedule-after-from-event-task.rs
@@ -1,15 +1,19 @@
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 #[macro_use]
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
 use cortex_m::peripheral::{DWT, ITM};
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -24,7 +28,7 @@ app! {
     tasks: {
         exti0: {
             interrupt: EXTI0,
-            async_after: [a],
+            schedule_after: [a],
             resources: [ITM],
         },
 
@@ -38,7 +42,7 @@ const S: u32 = 8_000_000;
 
 #[inline(always)]
 fn init(ctxt: init::Context) -> init::LateResources {
-    unsafe { rtfm::set_pending(stm32f103xx::Interrupt::EXTI0) }
+    rtfm::_impl::trigger(stm32f103xx::Interrupt::EXTI0);
 
     init::LateResources { ITM: ctxt.core.ITM }
 }
@@ -54,21 +58,35 @@ fn exti0(mut ctxt: exti0::Context) {
     let now = DWT::get_cycle_count();
     iprintln!(
         &mut ctxt.resources.ITM.stim[0],
-        "exti0(bl={}, now={})",
-        ctxt.baseline,
+        "exti0(st={}, now={})",
+        ctxt.start_time,
         now
     );
 
-    let t = &mut ctxt.threshold;
-    ctxt.async.a.post(t, 1 * S, ()).ok();
+    let t = &mut ctxt.priority;
+    ctxt.tasks.a.schedule_after(t, 1 * S).ok();
 }
 
 fn a(ctxt: a::Context) {
     let now = DWT::get_cycle_count();
     iprintln!(
         &mut ctxt.resources.ITM.stim[0],
-        "a(bl={}, now={})",
-        ctxt.baseline,
+        "a(st={}, now={})",
+        ctxt.scheduled_time,
         now
     );
 }
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/async-after.rs b/examples/schedule-after.rs
similarity index 62%
rename from examples/async-after.rs
rename to examples/schedule-after.rs
index f96ceb8ed1..dc7946b9df 100644
--- a/examples/async-after.rs
+++ b/examples/schedule-after.rs
@@ -1,14 +1,18 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -19,7 +23,7 @@ app! {
     tasks: {
         exti0: {
             interrupt: EXTI0,
-            async_after: [a],
+            schedule_after: [a],
         },
 
         a: {},
@@ -41,9 +45,21 @@ fn idle(_ctxt: idle::Context) -> ! {
 }
 
 fn exti0(mut ctxt: exti0::Context) {
-    ctxt.async.a.post(&mut ctxt.threshold, 1 * S, ()).ok();
+    ctxt.tasks.a.schedule_after(&mut ctxt.priority, 1 * S).ok();
 }
 
 fn a(_ctxt: a::Context) {
     asm::bkpt();
 }
+
+exception!(HardFault, hard_fault);
+
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/interrupt-async.rs b/examples/schedule-now-from-event-task.rs
similarity index 62%
rename from examples/interrupt-async.rs
rename to examples/schedule-now-from-event-task.rs
index c3b6f3d2ae..98531ddfa3 100644
--- a/examples/interrupt-async.rs
+++ b/examples/schedule-now-from-event-task.rs
@@ -1,15 +1,19 @@
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 #[macro_use]
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
 use cortex_m::peripheral::{DWT, ITM};
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
@@ -24,7 +28,7 @@ app! {
     tasks: {
         exti0: {
             interrupt: EXTI0,
-            async: [a],
+            schedule_now: [a],
             resources: [ITM],
         },
 
@@ -36,7 +40,7 @@ app! {
 
 #[inline(always)]
 fn init(ctxt: init::Context) -> init::LateResources {
-    unsafe { rtfm::set_pending(stm32f103xx::Interrupt::EXTI0) }
+    rtfm::_impl::trigger(stm32f103xx::Interrupt::EXTI0);
 
     init::LateResources { ITM: ctxt.core.ITM }
 }
@@ -52,21 +56,35 @@ fn exti0(mut ctxt: exti0::Context) {
     let now = DWT::get_cycle_count();
     iprintln!(
         &mut ctxt.resources.ITM.stim[0],
-        "exti0(bl={}, now={})",
-        ctxt.baseline,
+        "exti0(st={}, now={})",
+        ctxt.start_time,
         now
     );
 
-    let t = &mut ctxt.threshold;
-    ctxt.async.a.post(t, ()).ok();
+    let t = &mut ctxt.priority;
+    ctxt.tasks.a.schedule_now(t).ok();
 }
 
 fn a(ctxt: a::Context) {
     let now = DWT::get_cycle_count();
     iprintln!(
         &mut ctxt.resources.ITM.stim[0],
-        "a(bl={}, now={})",
-        ctxt.baseline,
+        "a(st={}, now={})",
+        ctxt.scheduled_time,
         now
     );
 }
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/async.rs b/examples/schedule-now.rs
similarity index 54%
rename from examples/async.rs
rename to examples/schedule-now.rs
index 03435caa76..f16539b2b9 100644
--- a/examples/async.rs
+++ b/examples/schedule-now.rs
@@ -9,21 +9,25 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
-extern crate panic_abort;
+extern crate panic_semihosting;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
+use rt::ExceptionFrame;
 use rtfm::app;
 
 app! {
     device: stm32f103xx,
 
     init: {
-        async: [a],
+        schedule_now: [a],
     },
 
     free_interrupts: [EXTI1],
@@ -31,7 +35,7 @@ app! {
     tasks: {
         exti0: {
             interrupt: EXTI0,
-            async: [a],
+            schedule_now: [a],
         },
 
         a: {},
@@ -39,7 +43,9 @@ app! {
 }
 
 #[inline(always)]
-fn init(_ctxt: init::Context) -> init::LateResources {
+fn init(mut ctxt: init::Context) -> init::LateResources {
+    ctxt.tasks.a.schedule_now(&mut ctxt.priority).unwrap();
+
     init::LateResources {}
 }
 
@@ -51,9 +57,21 @@ fn idle(_ctxt: idle::Context) -> ! {
 }
 
 fn exti0(mut ctxt: exti0::Context) {
-    ctxt.async.a.post(&mut ctxt.threshold, ()).ok();
+    ctxt.tasks.a.schedule_now(&mut ctxt.priority).ok();
 }
 
 fn a(_ctxt: a::Context) {
     asm::bkpt();
 }
+
+exception!(HardFault, hard_fault);
+
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/examples/user-struct.rs b/examples/user-struct.rs
index 0ac49348db..2efb98f32e 100644
--- a/examples/user-struct.rs
+++ b/examples/user-struct.rs
@@ -1,14 +1,18 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m;
+#[macro_use]
+extern crate cortex_m_rt as rt;
 extern crate cortex_m_rtfm as rtfm;
 extern crate panic_abort;
 extern crate stm32f103xx;
 
 use cortex_m::asm;
+use rt::ExceptionFrame;
 use rtfm::app;
 
 pub struct Foo(u32);
@@ -24,8 +28,8 @@ app! {
     free_interrupts: [EXTI0],
 
     init: {
-        async: [a],
-        async_after: [b],
+        schedule_now: [a],
+        schedule_after: [b],
     },
 
     tasks: {
@@ -54,3 +58,17 @@ fn idle(_ctxt: idle::Context) -> ! {
 fn a(_ctxt: a::Context) {}
 
 fn b(_ctxt: b::Context) {}
+
+exception!(HardFault, hard_fault);
+
+#[inline(always)]
+fn hard_fault(ef: &ExceptionFrame) -> ! {
+    panic!("HardFault at {:#?}", ef);
+}
+
+exception!(*, default_handler);
+
+#[inline(always)]
+fn default_handler(irqn: i16) {
+    panic!("Unhandled exception (IRQn = {})", irqn);
+}
diff --git a/macros/Cargo.toml b/macros/Cargo.toml
index 4161ed3448..5cb64266f7 100644
--- a/macros/Cargo.toml
+++ b/macros/Cargo.toml
@@ -14,7 +14,7 @@ failure = "0.1.1"
 proc-macro2 = "0.3.6"
 quote = "0.5.1"
 # rtfm-syntax = "0.3.0"
-rtfm-syntax = { git = "https://github.com/japaric/rtfm-syntax", branch = "tq" }
+rtfm-syntax = { path = "../../rtfm-syntax" }
 syn = "0.13.1"
 either = "1.5.0"
 
diff --git a/macros/src/analyze.rs b/macros/src/analyze.rs
index 46dbea6e7d..c901da9ab2 100644
--- a/macros/src/analyze.rs
+++ b/macros/src/analyze.rs
@@ -6,50 +6,50 @@ use syn::{Ident, Type};
 use syntax::check::App;
 
 pub fn app(app: &App) -> Context {
-    let mut async = HashSet::new();
-    let mut async_after = HashSet::new();
+    let mut schedule_now = HashSet::new();
+    let mut schedule_after = HashSet::new();
     let mut dispatchers = HashMap::new();
     let mut triggers = HashMap::new();
     let mut tq = TimerQueue::new();
     let mut free_interrupts = app.free_interrupts.iter().cloned().collect::<Vec<_>>();
 
-    async.extend(&app.init.async);
+    schedule_now.extend(&app.init.schedule_now);
 
-    for task in &app.init.async_after {
-        async_after.insert(*task);
+    for task in &app.init.schedule_after {
+        schedule_after.insert(*task);
 
         // Timer queue
         if let Entry::Vacant(entry) = tq.tasks.entry(*task) {
-            tq.capacity += app.tasks[task].interrupt_or_capacity.right().unwrap();
+            tq.capacity += app.tasks[task].interrupt_or_instances.right().unwrap();
             entry.insert(app.tasks[task].priority);
         }
     }
 
     // compute dispatchers
     for (name, task) in &app.tasks {
-        match task.interrupt_or_capacity {
+        match task.interrupt_or_instances {
             Either::Left(interrupt) => {
                 triggers.insert(interrupt, (*name, task.priority));
             }
-            Either::Right(capacity) => {
+            Either::Right(instances) => {
                 let dispatcher = dispatchers.entry(task.priority).or_insert_with(|| {
                     Dispatcher::new(free_interrupts.pop().expect("not enough free interrupts"))
                 });
                 dispatcher.tasks.push(*name);
-                dispatcher.capacity += capacity;
+                dispatcher.capacity += instances;
             }
         }
 
-        for task in &task.async {
-            async.insert(*task);
+        for task in &task.schedule_now {
+            schedule_now.insert(*task);
         }
 
-        for task in &task.async_after {
-            async_after.insert(*task);
+        for task in &task.schedule_after {
+            schedule_after.insert(*task);
 
             // Timer queue
             if let Entry::Vacant(entry) = tq.tasks.entry(*task) {
-                tq.capacity += app.tasks[task].interrupt_or_capacity.right().unwrap();
+                tq.capacity += app.tasks[task].interrupt_or_instances.right().unwrap();
                 entry.insert(app.tasks[task].priority);
             }
         }
@@ -86,12 +86,15 @@ pub fn app(app: &App) -> Context {
         }
     }
 
-    // async
-    for (caller_priority, task) in app.tasks
-        .values()
-        .flat_map(|caller| caller.async.iter().map(move |task| (caller.priority, task)))
-    {
-        // async callers contend for the consumer end of the task slot queue (#task::SQ) and ...
+    // schedule_now
+    for (caller_priority, task) in app.tasks.values().flat_map(|caller| {
+        caller
+            .schedule_now
+            .iter()
+            .map(move |task| (caller.priority, task))
+    }) {
+        // schedule_now callers contend for the consumer end of the task slot queue (#task::SQ) and
+        // ..
         let ceiling = ceilings.slot_queues.entry(*task).or_insert(caller_priority);
 
         if caller_priority > *ceiling {
@@ -110,15 +113,15 @@ pub fn app(app: &App) -> Context {
         }
     }
 
-    // async_after
+    // schedule_after
     for (caller_priority, task) in app.tasks.values().flat_map(|caller| {
         caller
-            .async_after
+            .schedule_after
             .iter()
             .map(move |task| (caller.priority, task))
     }) {
-        // async_after callers contend for the consumer end of the task slot queue (#task::SQ) and
-        // ...
+        // schedule_after callers contend for the consumer end of the task slot queue (#task::SQ)
+        // and ..
         let ceiling = ceilings.slot_queues.entry(*task).or_insert(caller_priority);
 
         if caller_priority > *ceiling {
@@ -132,8 +135,8 @@ pub fn app(app: &App) -> Context {
     }
 
     Context {
-        async,
-        async_after,
+        schedule_now,
+        schedule_after,
         ceilings,
         dispatchers,
         sys_tick,
@@ -143,10 +146,10 @@ pub fn app(app: &App) -> Context {
 }
 
 pub struct Context {
-    // set of `async` tasks
-    pub async: HashSet<Ident>,
-    // set of `async_after` tasks
-    pub async_after: HashSet<Ident>,
+    // set of `schedule_now` tasks
+    pub schedule_now: HashSet<Ident>,
+    // set of `schedule_after` tasks
+    pub schedule_after: HashSet<Ident>,
     pub ceilings: Ceilings,
     // Priority:u8 -> Dispatcher
     pub dispatchers: HashMap<u8, Dispatcher>,
diff --git a/macros/src/check.rs b/macros/src/check.rs
index 300eac82c6..3094b4eed7 100644
--- a/macros/src/check.rs
+++ b/macros/src/check.rs
@@ -3,11 +3,13 @@ use syntax::Result;
 
 pub fn app(app: &App) -> Result<()> {
     if !cfg!(feature = "timer-queue") {
-        if !app.init.async_after.is_empty()
-            || app.tasks.values().any(|task| !task.async_after.is_empty())
+        if !app.init.schedule_after.is_empty()
+            || app.tasks
+                .values()
+                .any(|task| !task.schedule_after.is_empty())
         {
             return Err(format_err!(
-                "async_after is not supported. Enable the 'timer-queue' feature to use it"
+                "schedule_after is not supported. Enable the 'timer-queue' feature to use it"
             ));
         }
     }
diff --git a/macros/src/trans.rs b/macros/src/trans.rs
index 5294784c74..f8ee2999eb 100644
--- a/macros/src/trans.rs
+++ b/macros/src/trans.rs
@@ -8,20 +8,13 @@ use analyze::Context;
 
 pub fn app(ctxt: &Context, app: &App) -> Tokens {
     let mut root = vec![];
-    let krate = Ident::from("cortex_m_rtfm");
+    let k = Ident::from("_rtfm");
     let device = &app.device;
-    let hidden = Ident::from("__hidden");
 
-    let needs_tq = !ctxt.async_after.is_empty();
+    let needs_tq = !ctxt.schedule_after.is_empty();
 
-    /* root */
-    // NOTE we can't use paths like `#krate::foo` in the root because there's no guarantee that the
-    // user has not renamed `cortex_m_rtfm` (e.g. `extern crate cortex_m_rtfm as rtfm`) so instead
-    // we add this `#hidden` module and use `#hidden::#krate::foo` in the root.
     root.push(quote! {
-        mod #hidden {
-            pub extern crate #krate;
-        }
+        extern crate cortex_m_rtfm as #k;
     });
 
     /* Resources */
@@ -32,7 +25,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
             .expr
             .as_ref()
             .map(|e| quote!(#e))
-            .unwrap_or_else(|| quote!(unsafe { #hidden::#krate::uninitialized() }));
+            .unwrap_or_else(|| quote!(unsafe { ::#k::_impl::uninitialized() }));
 
         let ceiling = Ident::from(format!(
             "U{}",
@@ -45,12 +38,12 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         ));
         root.push(quote! {
             #[allow(unsafe_code)]
-            unsafe impl #hidden::#krate::Resource for __resource::#name {
+            unsafe impl ::#k::Resource for _resource::#name {
                 const NVIC_PRIO_BITS: u8 = ::#device::NVIC_PRIO_BITS;
-                type Ceiling = #hidden::#krate::#ceiling;
+                type Ceiling = ::#k::_impl::#ceiling;
                 type Data = #ty;
 
-                unsafe fn get() -> &'static mut Self::Data {
+                unsafe fn _var() -> &'static mut Self::Data {
                     static mut #name: #ty = #expr;
 
                     &mut #name
@@ -72,9 +65,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     }
 
     root.push(quote! {
-        mod __resource {
-            extern crate #krate;
-
+        mod _resource {
             #[allow(unused_imports)]
             use core::marker::PhantomData;
 
@@ -85,7 +76,6 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     /* Tasks */
     for (name, task) in &app.tasks {
         let path = &task.path;
-        let input = &task.input;
 
         let lifetime = if task.resources
             .iter()
@@ -96,7 +86,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
             None
         };
 
-        let __context = Ident::from(format!(
+        let _context = Ident::from(format!(
             "_ZN{}{}7ContextE",
             name.as_ref().as_bytes().len(),
             name
@@ -104,50 +94,61 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
         let mut mod_ = vec![];
 
+        let time_field = if task.interrupt_or_instances.is_left() {
+            quote!(start_time)
+        } else {
+            quote!(scheduled_time)
+        };
+
+        let input_ = task.input
+            .as_ref()
+            .map(|input| quote!(#input))
+            .unwrap_or(quote!(()));
+
         // NOTE some stuff has to go in the root because `#input` is not guaranteed to be a
         // primitive type and there's no way to import that type into a module (we don't know its
         // full path). So instead we just assume that `#input` has been imported in the root; this
         // forces us to put anything that refers to `#input` in the root.
         if cfg!(feature = "timer-queue") {
             root.push(quote! {
-                pub struct #__context<#lifetime> {
-                    pub async: #name::Async,
-                    pub baseline: u32,
-                    pub input: #input,
+                pub struct #_context<#lifetime> {
+                    pub #time_field: u32,
+                    pub input: #input_,
                     pub resources: #name::Resources<#lifetime>,
-                    pub threshold: #hidden::#krate::Threshold<#name::Priority>,
+                    pub tasks: #name::Tasks,
+                    pub priority: ::#k::Priority<#name::Priority>,
                 }
 
                 #[allow(unsafe_code)]
-                impl<#lifetime> #__context<#lifetime> {
-                    pub unsafe fn new(bl: #hidden::#krate::Instant, payload: #input) -> Self {
-                        #__context {
-                            async: #name::Async::new(bl),
-                            baseline: bl.into(),
+                impl<#lifetime> #_context<#lifetime> {
+                    pub unsafe fn new(bl: ::#k::_impl::Instant, payload: #input_) -> Self {
+                        #_context {
+                            tasks: #name::Tasks::new(bl),
+                            #time_field: bl.into(),
                             input: payload,
                             resources: #name::Resources::new(),
-                            threshold: #hidden::#krate::Threshold::new(),
+                            priority: ::#k::Priority::_new(),
                         }
                     }
                 }
             });
         } else {
             root.push(quote! {
-                pub struct #__context<#lifetime> {
-                    pub async: #name::Async,
-                    pub input: #input,
+                pub struct #_context<#lifetime> {
+                    pub tasks: #name::Tasks,
+                    pub input: #input_,
                     pub resources: #name::Resources<#lifetime>,
-                    pub threshold: #hidden::#krate::Threshold<#name::Priority>,
+                    pub priority: ::#k::Priority<#name::Priority>,
                 }
 
                 #[allow(unsafe_code)]
-                impl<#lifetime> #__context<#lifetime> {
-                    pub unsafe fn new(payload: #input) -> Self {
-                        #__context {
-                            async: #name::Async::new(),
+                impl<#lifetime> #_context<#lifetime> {
+                    pub unsafe fn new(payload: #input_) -> Self {
+                        #_context {
+                            tasks: #name::Tasks::new(),
                             input: payload,
                             resources: #name::Resources::new(),
-                            threshold: #hidden::#krate::Threshold::new(),
+                            priority: ::#k::Priority::_new(),
                         }
                     }
                 }
@@ -161,61 +162,59 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
                     let ty = &app.resources[res].ty;
                     quote!(pub #res: &'a mut #ty)
                 } else {
-                    quote!(pub #res: super::__resource::#res)
+                    quote!(pub #res: ::_resource::#res)
                 }
             })
             .collect::<Vec<_>>();
 
         let res_exprs = task.resources.iter().map(|res| {
             if ctxt.ceilings.resources()[res].is_owned() {
-                quote!(#res: super::__resource::#res::get())
+                quote!(#res: ::_resource::#res::_var())
             } else {
-                quote!(#res: super::__resource::#res::new())
+                quote!(#res: ::_resource::#res::new())
             }
         });
 
-        let async_fields = task.async
+        let tasks_fields = task.schedule_now
             .iter()
-            .map(|task| quote!(pub #task: ::__async::#task))
+            .map(|task| quote!(pub #task: ::_schedule_now::#task))
             .chain(
-                task.async_after
+                task.schedule_after
                     .iter()
-                    .map(|task| quote!(pub #task: ::__async_after::#task)),
+                    .map(|task| quote!(pub #task: ::_schedule_after::#task)),
             )
             .collect::<Vec<_>>();
 
-        let async_exprs = task.async
+        let tasks_exprs = task.schedule_now
             .iter()
             .map(|task| {
                 if cfg!(feature = "timer-queue") {
-                    quote!(#task: ::__async::#task::new(_bl))
+                    quote!(#task: ::_schedule_now::#task::new(_bl))
                 } else {
-                    quote!(#task: ::__async::#task::new())
+                    quote!(#task: ::_schedule_now::#task::new())
                 }
             })
             .chain(
-                task.async_after
+                task.schedule_after
                     .iter()
-                    .map(|task| quote!(#task: ::__async_after::#task::new(_bl))),
+                    .map(|task| quote!(#task: ::_schedule_after::#task::new(_bl))),
             )
             .collect::<Vec<_>>();
 
         let priority = Ident::from(format!("U{}", task.priority));
         mod_.push(quote! {
-            extern crate #krate;
-
             #[allow(unused_imports)]
-            use self::#krate::Resource;
+            use ::#k::Resource;
 
             pub const HANDLER: fn(Context) = ::#path;
 
             // The priority at this task is dispatched at
-            pub type Priority = #krate::#priority;
+            pub type Priority = ::#k::_impl::#priority;
 
-            pub use super::#__context as Context;
+            pub use ::#_context as Context;
 
-            pub struct Async {
-                #(#async_fields,)*
+            pub struct Tasks {
+                #(#tasks_fields,)*
             }
 
             #[allow(non_snake_case)]
@@ -236,39 +235,38 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         if cfg!(feature = "timer-queue") {
             mod_.push(quote! {
                 #[allow(unsafe_code)]
-                impl Async {
-                    pub unsafe fn new(_bl: #krate::Instant) -> Self {
-                        Async {
-                            #(#async_exprs,)*
+                impl Tasks {
+                    pub unsafe fn new(_bl: ::#k::_impl::Instant) -> Self {
+                        Tasks {
+                            #(#tasks_exprs,)*
                         }
                     }
                 }
-
             });
         } else {
             mod_.push(quote! {
                 #[allow(unsafe_code)]
-                impl Async {
+                impl Tasks {
                     pub unsafe fn new() -> Self {
-                        Async {
-                            #(#async_exprs,)*
+                        Tasks {
+                            #(#tasks_exprs,)*
                         }
                     }
                 }
-
             });
         }
 
-        match task.interrupt_or_capacity {
+        match task.interrupt_or_instances {
             Either::Left(interrupt) => {
                 let export_name = interrupt.as_ref();
-                let fn_name = Ident::from(format!("__{}", interrupt));
+                let fn_name = Ident::from(format!("_{}", interrupt));
 
                 let bl = if cfg!(feature = "timer-queue") {
-                    Some(quote!(#hidden::#krate::Instant::now(),))
+                    Some(quote!(_now,))
                 } else {
                     None
                 };
+
                 root.push(quote! {
                     #[allow(non_snake_case)]
                     #[allow(unsafe_code)]
@@ -276,27 +274,29 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
                     pub unsafe extern "C" fn #fn_name() {
                         use #device::Interrupt;
                         let _ = Interrupt::#interrupt; // verify that the interrupt exists
+                        let _now = ::#k::_impl::Instant::now();
+                        core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
                         #name::HANDLER(#name::Context::new(#bl ()))
                     }
                 });
             }
-            Either::Right(capacity) => {
-                let ucapacity = Ident::from(format!("U{}", capacity));
-                let capacity = capacity as usize;
+            Either::Right(instances) => {
+                let ucapacity = Ident::from(format!("U{}", instances));
+                let capacity = instances as usize;
 
                 root.push(quote! {
                     #[allow(unsafe_code)]
-                    unsafe impl #hidden::#krate::Resource for #name::SQ {
+                    unsafe impl ::#k::Resource for #name::FREE_QUEUE {
                         const NVIC_PRIO_BITS: u8 = ::#device::NVIC_PRIO_BITS;
                         type Ceiling = #name::Ceiling;
-                        type Data = #hidden::#krate::SlotQueue<#hidden::#krate::#ucapacity>;
+                        type Data = ::#k::_impl::FreeQueue<::#k::_impl::#ucapacity>;
 
-                        unsafe fn get() -> &'static mut Self::Data {
-                            static mut SQ:
-                                #hidden::#krate::SlotQueue<#hidden::#krate::#ucapacity> =
-                                #hidden::#krate::SlotQueue::u8();
+                        unsafe fn _var() -> &'static mut Self::Data {
+                            static mut FREE_QUEUE:
+                                ::#k::_impl::FreeQueue<::#k::_impl::#ucapacity> =
+                                ::#k::_impl::FreeQueue::u8();
 
-                            &mut SQ
+                            &mut FREE_QUEUE
                         }
                     }
 
@@ -314,8 +314,8 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
                 root.push(quote! {
                     #[allow(non_upper_case_globals)]
                     #[allow(unsafe_code)]
-                    pub static mut #mangled: [#input; #capacity] =
-                        unsafe { #hidden::#krate::uninitialized() };
+                    pub static mut #mangled: [#input_; #capacity] =
+                        unsafe { ::#k::_impl::uninitialized() };
 
                 });
 
@@ -324,22 +324,23 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
                     #[allow(dead_code)]
                     #[allow(unsafe_code)]
-                    pub static mut BASELINES: [#krate::Instant; #capacity] = unsafe {
-                        #krate::uninitialized()
+                    pub static mut SCHEDULED_TIMES: [::#k::_impl::Instant; #capacity] = unsafe {
+                        ::#k::_impl::uninitialized()
                     };
 
-                    pub struct SQ { _0: () }
+                    #[allow(non_camel_case_types)]
+                    pub struct FREE_QUEUE { _0: () }
 
                     #[allow(dead_code)]
                     #[allow(unsafe_code)]
-                    impl SQ {
+                    impl FREE_QUEUE {
                         pub unsafe fn new() -> Self {
-                            SQ { _0: () }
+                            FREE_QUEUE { _0: () }
                         }
                     }
 
-                    // Ceiling of the `SQ` resource
-                    pub type Ceiling = #krate::#ceiling;
+                    // Ceiling of the `FREE_QUEUE` resource
+                    pub type Ceiling = ::#k::_impl::#ceiling;
                 });
             }
         }
@@ -351,15 +352,24 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         });
     }
 
-    /* Async */
-    let async = ctxt.async
+    /* schedule_now */
+    let schedule_now = ctxt.schedule_now
         .iter()
         .map(|name| {
             let task = &app.tasks[name];
             let priority = task.priority;
-            let __priority = Ident::from(format!("__{}", priority));
+            let _priority = Ident::from(format!("_{}", priority));
             let interrupt = ctxt.dispatchers[&priority].interrupt();
-            let ty = &task.input;
+
+            let input_ = task.input
+                .as_ref()
+                .map(|input| quote!(#input))
+                .unwrap_or(quote!(()));
+            let (payload_in, payload_out) = if let Some(input) = task.input.as_ref() {
+                (quote!(payload: #input,), quote!(payload))
+            } else {
+                (quote!(), quote!(()))
+            };
 
             let sqc = Ident::from(format!(
                 "U{}",
@@ -372,45 +382,44 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
                 root.push(quote! {
                     #[allow(dead_code)]
                     #[allow(unsafe_code)]
-                    impl __async::#name {
+                    impl _schedule_now::#name {
                         #[inline]
-                        pub fn post<P>(
+                        pub fn schedule_now<P>(
                             &mut self,
-                            t: &mut #hidden::#krate::Threshold<P>,
-                            payload: #ty,
-                        ) -> Result<(), #ty>
+                            t: &mut ::#k::Priority<P>,
+                            #payload_in
+                        ) -> Result<(), #input_>
                         where
-                            P: #hidden::#krate::Unsigned +
-                                #hidden::#krate::Max<#hidden::#krate::#sqc> +
-                                #hidden::#krate::Max<#hidden::#krate::#qc>,
-                            #hidden::#krate::Maximum<P, #hidden::#krate::#sqc>: #hidden::#krate::Unsigned,
-                            #hidden::#krate::Maximum<P, #hidden::#krate::#qc>: #hidden::#krate::Unsigned,
+                            P: ::#k::_impl::Unsigned +
+                                ::#k::_impl::Max<::#k::_impl::#sqc> +
+                                ::#k::_impl::Max<::#k::_impl::#qc>,
+                            ::#k::_impl::Maximum<P, ::#k::_impl::#sqc>: ::#k::_impl::Unsigned,
+                            ::#k::_impl::Maximum<P, ::#k::_impl::#qc>: ::#k::_impl::Unsigned,
                         {
                             unsafe {
-                                use #hidden::#krate::Resource;
+                                use ::#k::Resource;
 
-                                let slot = ::#name::SQ::new().claim_mut(t, |sq, _| sq.dequeue());
+                                let slot = ::#name::FREE_QUEUE::new()
+                                    .claim_mut(t, |sq, _| sq.dequeue());
                                 if let Some(index) = slot {
-                                    let task = ::#__priority::Task::#name;
+                                    let task = ::#_priority::Task::#name;
                                     core::ptr::write(
                                         #name::PAYLOADS.get_unchecked_mut(index as usize),
-                                        payload,
-                                    );
-                                    core::ptr::write(
-                                        #name::BASELINES.get_unchecked_mut(index as usize),
-                                        self.baseline(),
+                                        #payload_out,
                                     );
+                                    *#name::SCHEDULED_TIMES.get_unchecked_mut(index as usize) =
+                                        self.scheduled_time();
 
-                                    #__priority::Q::new().claim_mut(t, |q, _| {
+                                    #_priority::READY_QUEUE::new().claim_mut(t, |q, _| {
                                         q.split().0.enqueue_unchecked((task, index));
                                     });
 
                                     use #device::Interrupt;
-                                    #hidden::#krate::set_pending(Interrupt::#interrupt);
+                                    ::#k::_impl::trigger(Interrupt::#interrupt);
 
                                     Ok(())
                                 } else {
-                                    Err(payload)
+                                    Err(#payload_out)
                                 }
                             }
                         }
@@ -419,17 +428,17 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
                 quote! {
                     #[allow(non_camel_case_types)]
-                    pub struct #name { baseline: #krate::Instant }
+                    pub struct #name { scheduled_time: ::#k::_impl::Instant }
 
                     #[allow(dead_code)]
                     #[allow(unsafe_code)]
                     impl #name {
-                        pub unsafe fn new(bl: #krate::Instant) -> Self {
-                            #name { baseline: bl }
+                        pub unsafe fn new(bl: ::#k::_impl::Instant) -> Self {
+                            #name { scheduled_time: bl }
                         }
 
-                        pub fn baseline(&self) -> #krate::Instant {
-                            self.baseline
+                        pub fn scheduled_time(&self) -> ::#k::_impl::Instant {
+                            self.scheduled_time
                         }
                     }
                 }
@@ -437,41 +446,41 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
                 root.push(quote! {
                     #[allow(dead_code)]
                     #[allow(unsafe_code)]
-                    impl __async::#name {
+                    impl _schedule_now::#name {
                         #[inline]
-                        pub fn post<P>(
+                        pub fn schedule_now<P>(
                             &mut self,
-                            t: &mut #hidden::#krate::Threshold<P>,
-                            payload: #ty,
-                        ) -> Result<(), #ty>
+                            t: &mut ::#k::Priority<P>,
+                            #payload_in
+                        ) -> Result<(), #input_>
                         where
-                            P: #hidden::#krate::Unsigned +
-                                #hidden::#krate::Max<#hidden::#krate::#sqc> +
-                                #hidden::#krate::Max<#hidden::#krate::#qc>,
-                            #hidden::#krate::Maximum<P, #hidden::#krate::#sqc>: #hidden::#krate::Unsigned,
-                            #hidden::#krate::Maximum<P, #hidden::#krate::#qc>: #hidden::#krate::Unsigned,
+                            P: ::#k::_impl::Unsigned +
+                                ::#k::_impl::Max<::#k::_impl::#sqc> +
+                                ::#k::_impl::Max<::#k::_impl::#qc>,
+                            ::#k::_impl::Maximum<P, ::#k::_impl::#sqc>: ::#k::_impl::Unsigned,
+                            ::#k::_impl::Maximum<P, ::#k::_impl::#qc>: ::#k::_impl::Unsigned,
                         {
                             unsafe {
-                                use #hidden::#krate::Resource;
+                                use ::#k::Resource;
 
                                 if let Some(index) =
-                                    ::#name::SQ::new().claim_mut(t, |sq, _| sq.dequeue()) {
-                                    let task = ::#__priority::Task::#name;
+                                    ::#name::FREE_QUEUE::new().claim_mut(t, |sq, _| sq.dequeue()) {
+                                    let task = ::#_priority::Task::#name;
                                     core::ptr::write(
                                         ::#name::PAYLOADS.get_unchecked_mut(index as usize),
-                                        payload,
+                                        #payload_out,
                                     );
 
-                                    ::#__priority::Q::new().claim_mut(t, |q, _| {
+                                    ::#_priority::READY_QUEUE::new().claim_mut(t, |q, _| {
                                         q.split().0.enqueue_unchecked((task, index));
                                     });
 
                                     use #device::Interrupt;
-                                    #hidden::#krate::set_pending(Interrupt::#interrupt);
+                                    ::#k::_impl::trigger(Interrupt::#interrupt);
 
                                     Ok(())
                                 } else {
-                                    Err(payload)
+                                    Err(#payload_out)
                                 }
                             }
                         }
@@ -494,22 +503,19 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         })
         .collect::<Vec<_>>();
     root.push(quote! {
-        mod __async {
-            extern crate #krate;
-
+        mod _schedule_now {
             #[allow(unused_imports)]
-            use self::#krate::Resource;
+            use ::#k::Resource;
 
-            #(#async)*
+            #(#schedule_now)*
         }
     });
 
-    /* Async (+after) */
-    let async_after = ctxt.async_after
+    /* schedule_after */
+    let schedule_after = ctxt.schedule_after
         .iter()
         .map(|name| {
             let task = &app.tasks[name];
-            let ty = &task.input;
 
             let sqc = Ident::from(format!(
                 "U{}",
@@ -517,51 +523,61 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
             ));
             let tqc = Ident::from(format!("U{}", ctxt.ceilings.timer_queue()));
 
+            let input_ = task.input
+                .as_ref()
+                .map(|input| quote!(#input))
+                .unwrap_or(quote!(()));
+            let (payload_in, payload_out) = if let Some(input) = task.input.as_ref() {
+                (quote!(payload: #input,), quote!(payload))
+            } else {
+                (quote!(), quote!(()))
+            };
+
             // NOTE needs to be in the root because of `#ty`
             root.push(quote! {
                 #[allow(dead_code)]
                 #[allow(unsafe_code)]
-                impl __async_after::#name {
+                impl _schedule_after::#name {
                     #[inline]
-                    pub fn post<P>(
+                    pub fn schedule_after<P>(
                         &self,
-                        t: &mut #hidden::#krate::Threshold<P>,
+                        t: &mut ::#k::Priority<P>,
                         after: u32,
-                        payload: #ty,
-                    ) -> Result<(), #ty>
+                        #payload_in
+                    ) -> Result<(), #input_>
                     where
-                        P: #hidden::#krate::Unsigned +
-                            #hidden::#krate::Max<#hidden::#krate::#sqc> +
-                            #hidden::#krate::Max<#hidden::#krate::#tqc>,
-                        #hidden::#krate::Maximum<P, #hidden::#krate::#sqc>: #hidden::#krate::Unsigned,
-                        #hidden::#krate::Maximum<P, #hidden::#krate::#tqc>: #hidden::#krate::Unsigned,
+                        P: ::#k::_impl::Unsigned +
+                            ::#k::_impl::Max<::#k::_impl::#sqc> +
+                            ::#k::_impl::Max<::#k::_impl::#tqc>,
+                        ::#k::_impl::Maximum<P, ::#k::_impl::#sqc>: ::#k::_impl::Unsigned,
+                        ::#k::_impl::Maximum<P, ::#k::_impl::#tqc>: ::#k::_impl::Unsigned,
                     {
                         unsafe {
-                            use #hidden::#krate::Resource;
+                            use ::#k::Resource;
 
                             if let Some(index) =
-                                ::#name::SQ::new().claim_mut(t, |sq, _| sq.dequeue()) {
-                                let bl = self.baseline() + after;
-                                let task = ::__tq::Task::#name;
+                                ::#name::FREE_QUEUE::new().claim_mut(t, |sq, _| sq.dequeue()) {
+                                let ss = self.scheduled_time() + after;
+                                let task = ::_tq::Task::#name;
+
                                 core::ptr::write(
                                     ::#name::PAYLOADS.get_unchecked_mut(index as usize),
-                                    payload,
+                                    #payload_out,
                                 );
-                                core::ptr::write(
-                                    ::#name::BASELINES.get_unchecked_mut(index as usize),
-                                    bl,
-                                );
-                                let m = #hidden::#krate::Message {
-                                    baseline: bl,
+
+                                *::#name::SCHEDULED_TIMES.get_unchecked_mut(index as usize) = ss;
+
+                                let m = ::#k::_impl::NotReady {
+                                    scheduled_time: ss,
                                     index,
                                     task,
                                 };
 
-                                ::__tq::TQ::new().claim_mut(t, |tq, _| tq.enqueue(m));
+                                ::_tq::TIMER_QUEUE::new().claim_mut(t, |tq, _| tq.enqueue(m));
 
                                 Ok(())
                             } else {
-                                Err(payload)
+                                Err(#payload_out)
                             }
                         }
                     }
@@ -570,30 +586,28 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
             quote! {
                 #[allow(non_camel_case_types)]
-                pub struct #name { baseline: #krate::Instant }
+                pub struct #name { scheduled_time: ::#k::_impl::Instant }
 
                 #[allow(dead_code)]
                 #[allow(unsafe_code)]
                 impl #name {
-                    pub unsafe fn new(bl: #krate::Instant) -> Self {
-                        #name { baseline: bl }
+                    pub unsafe fn new(ss: ::#k::_impl::Instant) -> Self {
+                        #name { scheduled_time: ss }
                     }
 
-                    pub fn baseline(&self) -> #krate::Instant {
-                        self.baseline
+                    pub fn scheduled_time(&self) -> ::#k::_impl::Instant {
+                        self.scheduled_time
                     }
                 }
             }
         })
         .collect::<Vec<_>>();
     root.push(quote! {
-        mod __async_after {
-            extern crate #krate;
-
+        mod _schedule_after {
             #[allow(unused_imports)]
-            use self::#krate::Resource;
+            use ::#k::Resource;
 
-            #(#async_after)*
+            #(#schedule_after)*
         }
     });
 
@@ -605,16 +619,16 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
             .tasks()
             .iter()
             .map(|(name, priority)| {
-                let __priority = Ident::from(format!("__{}", priority));
+                let _priority = Ident::from(format!("_{}", priority));
                 let interrupt = ctxt.dispatchers[priority].interrupt();
 
                 quote! {
-                    __tq::Task::#name => {
-                        #__priority::Q::new().claim_mut(t, |q, _| {
-                            q.split().0.enqueue_unchecked((#__priority::Task::#name, index))
+                    _tq::Task::#name => {
+                        #_priority::READY_QUEUE::new().claim_mut(t, |q, _| {
+                            q.split().0.enqueue_unchecked((#_priority::Task::#name, index))
                         });
                         use #device::Interrupt;
-                        #hidden::#krate::set_pending(Interrupt::#interrupt);
+                        ::#k::_impl::trigger(Interrupt::#interrupt);
                     }
                 }
             })
@@ -623,34 +637,33 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         let ceiling = Ident::from(format!("U{}", ctxt.ceilings.timer_queue()));
         let priority = Ident::from(format!("U{}", ctxt.sys_tick));
         root.push(quote! {
-            mod __tq {
-                extern crate #krate;
-
-                pub struct TQ { _0: () }
+            mod _tq {
+                #[allow(non_camel_case_types)]
+                pub struct TIMER_QUEUE { _0: () }
 
                 #[allow(unsafe_code)]
-                impl TQ {
+                impl TIMER_QUEUE {
                     pub unsafe fn new() -> Self {
-                        TQ { _0: () }
+                        TIMER_QUEUE { _0: () }
                     }
                 }
 
                 #[allow(unsafe_code)]
-                unsafe impl #krate::Resource for TQ {
+                unsafe impl ::#k::Resource for TIMER_QUEUE {
                     const NVIC_PRIO_BITS: u8 = ::#device::NVIC_PRIO_BITS;
-                    type Ceiling = #krate::#ceiling;
-                    type Data = #krate::TimerQueue<Task, #krate::#capacity>;
+                    type Ceiling = ::#k::_impl::#ceiling;
+                    type Data = ::#k::_impl::TimerQueue<Task, ::#k::_impl::#capacity>;
 
-                    unsafe fn get() -> &'static mut Self::Data {
-                        static mut TQ: #krate::TimerQueue<Task, #krate::#capacity> =
-                            unsafe { #krate::uninitialized() };
+                    unsafe fn _var() -> &'static mut Self::Data {
+                        static mut TIMER_QUEUE: ::#k::_impl::TimerQueue<Task, ::#k::_impl::#capacity> =
+                            unsafe { ::#k::_impl::uninitialized() };
 
-                        &mut TQ
+                        &mut TIMER_QUEUE
                     }
                 }
 
                 // SysTick priority
-                pub type Priority = #krate::#priority;
+                pub type Priority = ::#k::_impl::#priority;
 
                 #[allow(non_camel_case_types)]
                 #[allow(dead_code)]
@@ -660,13 +673,13 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
             #[allow(non_snake_case)]
             #[allow(unsafe_code)]
-            #[export_name = "SYS_TICK"]
-            pub unsafe extern "C" fn __SYS_TICK() {
-                use #hidden::#krate::Resource;
+            #[export_name = "SysTick"]
+            pub unsafe extern "C" fn _impl_SysTick() {
+                use ::#k::Resource;
 
-                #hidden::#krate::dispatch(
-                    &mut #hidden::#krate::Threshold::<__tq::Priority>::new(),
-                    &mut __tq::TQ::new(),
+                ::#k::_impl::dispatch(
+                    &mut ::#k::Priority::<_tq::Priority>::_new(),
+                    &mut _tq::TIMER_QUEUE::new(),
                     |t, task, index| {
                         match task {
                             #(#arms,)*
@@ -678,36 +691,36 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
     /* Dispatchers */
     for (priority, dispatcher) in &ctxt.dispatchers {
-        let __priority = Ident::from(format!("__{}", priority));
+        let _priority = Ident::from(format!("_{}", priority));
         let capacity = Ident::from(format!("U{}", dispatcher.capacity()));
         let tasks = dispatcher.tasks();
         let ceiling = Ident::from(format!("U{}", ctxt.ceilings.dispatch_queues()[priority]));
 
         root.push(quote! {
-            mod #__priority {
-                extern crate #krate;
-
-                pub struct Q { _0: () }
+            mod #_priority {
+                #[allow(non_camel_case_types)]
+                pub struct READY_QUEUE { _0: () }
 
                 #[allow(unsafe_code)]
                 #[allow(dead_code)]
-                impl Q {
+                impl READY_QUEUE {
                     pub unsafe fn new() -> Self {
-                        Q { _0: () }
+                        READY_QUEUE { _0: () }
                     }
                 }
 
                 #[allow(unsafe_code)]
-                unsafe impl #krate::Resource for Q {
+                unsafe impl ::#k::Resource for READY_QUEUE {
                     const NVIC_PRIO_BITS: u8 = ::#device::NVIC_PRIO_BITS;
-                    type Ceiling = #krate::#ceiling;
-                    type Data = #krate::PayloadQueue<Task, #krate::#capacity>;
+                    type Ceiling = ::#k::_impl::#ceiling;
+                    type Data = ::#k::_impl::ReadyQueue<Task, ::#k::_impl::#capacity>;
 
-                    unsafe fn get() -> &'static mut Self::Data {
-                        static mut Q: #krate::PayloadQueue<Task, #krate::#capacity> =
-                            #krate::PayloadQueue::u8();
+                    unsafe fn _var() -> &'static mut Self::Data {
+                        static mut READY_QUEUE:
+                            ::#k::_impl::ReadyQueue<Task, ::#k::_impl::#capacity> =
+                            ::#k::_impl::ReadyQueue::u8();
 
-                        &mut Q
+                        &mut READY_QUEUE
                     }
                 }
 
@@ -722,23 +735,27 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
             .tasks()
             .iter()
             .map(|name| {
-                // NOTE(get) this is the only `Slot` producer because a task can only be
+                // NOTE(_var) this is the only free slot producer because a task can only be
                 // dispatched at one priority
                 if cfg!(feature = "timer-queue") {
                     quote! {
-                    #__priority::Task::#name => {
-                        let payload = core::ptr::read(::#name::PAYLOADS.get_unchecked(index as usize));
-                        let baseline = core::ptr::read(::#name::BASELINES.get_unchecked(index as usize));
-                        #name::SQ::get().split().0.enqueue_unchecked(index);
-                        #name::HANDLER(#name::Context::new(baseline, payload));
+                    #_priority::Task::#name => {
+                        let payload =
+                            core::ptr::read(::#name::PAYLOADS.get_unchecked(index as usize));
+                        let ss = *::#name::SCHEDULED_TIMES.get_unchecked(index as usize);
+
+                        #name::FREE_QUEUE::_var().split().0.enqueue_unchecked(index);
+
+                        #name::HANDLER(#name::Context::new(ss, payload));
                     }
 
                     }
                 } else {
                     quote! {
-                    #__priority::Task::#name => {
-                        let payload = core::ptr::read(::#name::PAYLOADS.get_unchecked(index as usize));
-                        #name::SQ::get().split().0.enqueue_unchecked(index);
+                    #_priority::Task::#name => {
+                        let payload =
+                            core::ptr::read(::#name::PAYLOADS.get_unchecked(index as usize));
+                        #name::FREE_QUEUE::_var().split().0.enqueue_unchecked(index);
                         #name::HANDLER(#name::Context::new(payload));
                     }
                     }
@@ -748,16 +765,17 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
         let interrupt = dispatcher.interrupt();
         let export_name = interrupt.as_ref();
-        let fn_name = Ident::from(format!("__{}", export_name));
+        let fn_name = Ident::from(format!("_{}", export_name));
         root.push(quote! {
             #[allow(non_snake_case)]
             #[allow(unsafe_code)]
             #[export_name = #export_name]
             pub unsafe extern "C" fn #fn_name() {
-                use #hidden::#krate::Resource;
+                use ::#k::Resource;
 
-                // NOTE(get) the dispatcher is the only consumer of this queue
-                while let Some((task, index)) = #__priority::Q::get().split().1.dequeue() {
+                // NOTE(_var) the dispatcher is the only consumer of this queue
+                while let Some((task, index)) =
+                    #_priority::READY_QUEUE::_var().split().1.dequeue() {
                     match task {
                         #(#arms,)*
                     }
@@ -772,33 +790,62 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     for (name, task) in &app.tasks {
         let input = &task.input;
 
-        if let Either::Right(capacity) = task.interrupt_or_capacity {
+        if let Either::Right(instances) = task.interrupt_or_instances {
             pre_init.push(quote! {
-                for i in 0..#capacity {
-                    #name::SQ::get().enqueue_unchecked(i);
+                for i in 0..#instances {
+                    #name::FREE_QUEUE::_var().enqueue_unchecked(i);
                 }
             })
         }
     }
 
     let prio_bits = quote!(#device::NVIC_PRIO_BITS);
-    if needs_tq {
-        let priority = ctxt.sys_tick;
 
+    if needs_tq {
         pre_init.push(quote! {
             // Configure the system timer
-            _syst.set_clock_source(#hidden::#krate::SystClkSource::Core);
-            _syst.enable_counter();
-
-            // Set the priority of the SysTick exception
-            let priority = ((1 << #prio_bits) - #priority) << (8 - #prio_bits);
-            core.SCB.shpr[11].write(priority);
+            p.SYST.set_clock_source(::#k::_impl::SystClkSource::Core);
+            p.SYST.enable_counter();
 
             // Initialize the timer queue
-            core::ptr::write(__tq::TQ::get(), #hidden::#krate::TimerQueue::new(_syst));
+            core::ptr::write(_tq::TIMER_QUEUE::_var(), ::#k::_impl::TimerQueue::new(p.SYST));
         });
     }
 
+    let core = if cfg!(feature = "timer-queue") {
+        quote! {
+            ::#k::_impl::Peripherals {
+                CBP: p.CBP,
+                CPUID: p.CPUID,
+                DCB: p.DCB,
+                // DWT: p.DWT,
+                FPB: p.FPB,
+                FPU: p.FPU,
+                ITM: p.ITM,
+                MPU: p.MPU,
+                SCB: &mut p.SCB,
+                // SYST: p.SYST,
+                TPIU: p.TPIU,
+            }
+        }
+    } else {
+        quote! {
+            ::#k::_impl::Peripherals {
+                CBP: p.CBP,
+                CPUID: p.CPUID,
+                DCB: p.DCB,
+                DWT: p.DWT,
+                FPB: p.FPB,
+                FPU: p.FPU,
+                ITM: p.ITM,
+                MPU: p.MPU,
+                SCB: p.SCB,
+                SYST: p.SYST,
+                TPIU: p.TPIU,
+            }
+        }
+    };
+
     /* init */
     let res_fields = app.init
         .resources
@@ -812,36 +859,36 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     let res_exprs = app.init
         .resources
         .iter()
-        .map(|r| quote!(#r: __resource::#r::get()))
+        .map(|r| quote!(#r: _resource::#r::_var()))
         .collect::<Vec<_>>();
 
-    let async_fields = app.init
-        .async
+    let tasks_fields = app.init
+        .schedule_now
         .iter()
-        .map(|task| quote!(pub #task: ::__async::#task))
+        .map(|task| quote!(pub #task: ::_schedule_now::#task))
         .chain(
             app.init
-                .async_after
+                .schedule_after
                 .iter()
-                .map(|task| quote!(pub #task: ::__async_after::#task)),
+                .map(|task| quote!(pub #task: ::_schedule_after::#task)),
         )
         .collect::<Vec<_>>();
 
-    let async_exprs = app.init
-        .async
+    let tasks_exprs = app.init
+        .schedule_now
         .iter()
         .map(|task| {
             if cfg!(feature = "timer-queue") {
-                quote!(#task: ::__async::#task::new(_bl))
+                quote!(#task: ::_schedule_now::#task::new(_bl))
             } else {
-                quote!(#task: ::__async::#task::new())
+                quote!(#task: ::_schedule_now::#task::new())
             }
         })
         .chain(
             app.init
-                .async_after
+                .schedule_after
                 .iter()
-                .map(|task| quote!(#task: ::__async_after::#task::new(_bl))),
+                .map(|task| quote!(#task: ::_schedule_after::#task::new(_bl))),
         )
         .collect::<Vec<_>>();
 
@@ -857,20 +904,13 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         })
         .collect::<Vec<_>>();
 
-    let bl = if cfg!(feature = "timer-queue") {
-        Some(quote!(let _bl = #krate::Instant::new(0);))
+    let (bl, lt) = if cfg!(feature = "timer-queue") {
+        (
+            Some(quote!(let _bl = ::#k::_impl::Instant(0);)),
+            Some(quote!('a)),
+        )
     } else {
-        None
-    };
-    let baseline_field = if cfg!(feature = "timer-queue") {
-        Some(quote!(pub baseline: u32,))
-    } else {
-        None
-    };
-    let baseline_expr = if cfg!(feature = "timer-queue") {
-        Some(quote!(baseline: 0,))
-    } else {
-        None
+        (None, None)
     };
     root.push(quote! {
         #[allow(non_snake_case)]
@@ -879,49 +919,45 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         }
 
         mod init {
-            extern crate #krate;
-
             #[allow(unused_imports)]
-            use self::#krate::Resource;
+            use ::#k::Resource;
 
             pub use ::#device::Peripherals as Device;
             pub use ::_ZN4init13LateResourcesE as LateResources;
 
             #[allow(dead_code)]
-            pub struct Context {
-                pub async: Async,
-                #baseline_field
-                pub core: #krate::Core,
+            pub struct Context<#lt> {
+                pub core: ::#k::_impl::Peripherals<#lt>,
                 pub device: Device,
                 pub resources: Resources,
-                pub threshold: #krate::Threshold<#krate::U255>,
+                pub tasks: Tasks,
+                pub priority: ::#k::Priority<::#k::_impl::U255>,
             }
 
             #[allow(unsafe_code)]
-            impl Context {
-                pub unsafe fn new(core: #krate::Core) -> Self {
+            impl<#lt> Context<#lt> {
+                pub unsafe fn new(core: ::#k::_impl::Peripherals<#lt>) -> Self {
                     Context {
-                        async: Async::new(),
-                        #baseline_expr
+                        tasks: Tasks::new(),
                         core,
                         device: Device::steal(),
                         resources: Resources::new(),
-                        threshold: #krate::Threshold::new(),
+                        priority: ::#k::Priority::_new(),
                     }
                 }
             }
 
-            pub struct Async {
-                #(#async_fields,)*
+            pub struct Tasks {
+                #(#tasks_fields,)*
             }
 
             #[allow(unsafe_code)]
-            impl Async {
+            impl Tasks {
                 unsafe fn new() -> Self {
                     #bl
 
-                    Async {
-                        #(#async_exprs,)*
+                    Tasks {
+                        #(#tasks_exprs,)*
                     }
                 }
             }
@@ -945,11 +981,21 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     /* post-init */
     let mut post_init = vec![];
 
+    if needs_tq {
+        let priority = ctxt.sys_tick;
+
+        post_init.push(quote! {
+            // Set the priority of the SysTick exception
+            let priority = ((1 << #prio_bits) - #priority) << (8 - #prio_bits);
+            p.SCB.shpr[11].write(priority);
+        });
+    }
+
     // Initialize LateResources
     for (name, res) in &app.resources {
         if res.expr.is_none() {
             post_init.push(quote! {
-                core::ptr::write(__resource::#name::get(), _lr.#name);
+                core::ptr::write(_resource::#name::_var(), _lr.#name);
             });
         }
     }
@@ -959,7 +1005,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         let interrupt = dispatcher.interrupt();
         post_init.push(quote! {
             let priority = ((1 << #prio_bits) - #priority) << (8 - #prio_bits);
-            _nvic.set_priority(Interrupt::#interrupt, priority);
+            p.NVIC.set_priority(Interrupt::#interrupt, priority);
         });
     }
 
@@ -967,7 +1013,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     for (interrupt, (_, priority)) in &ctxt.triggers {
         post_init.push(quote! {
             let priority = ((1 << #prio_bits) - #priority) << (8 - #prio_bits);
-            _nvic.set_priority(Interrupt::#interrupt, priority);
+            p.NVIC.set_priority(Interrupt::#interrupt, priority);
         });
     }
 
@@ -975,14 +1021,22 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     for dispatcher in ctxt.dispatchers.values() {
         let interrupt = dispatcher.interrupt();
         post_init.push(quote! {
-            _nvic.enable(Interrupt::#interrupt);
+            p.NVIC.enable(Interrupt::#interrupt);
         });
     }
 
     // Enable triggers
     for interrupt in ctxt.triggers.keys() {
         post_init.push(quote! {
-            _nvic.enable(Interrupt::#interrupt);
+            p.NVIC.enable(Interrupt::#interrupt);
+        });
+    }
+
+    if needs_tq {
+        post_init.push(quote! {
+            // Set the system time to zero
+            p.DWT.enable_cycle_counter();
+            p.DWT.cyccnt.write(0);
         });
     }
 
@@ -996,7 +1050,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
 
                 quote!(pub #res: &'static mut #ty)
             } else {
-                quote!(pub #res: __resource::#res)
+                quote!(pub #res: _resource::#res)
             }
         })
         .collect::<Vec<_>>();
@@ -1006,24 +1060,22 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
         .iter()
         .map(|res| {
             if ctxt.ceilings.resources()[res].is_owned() {
-                quote!(#res: __resource::#res::get())
+                quote!(#res: _resource::#res::_var())
             } else {
-                quote!(#res: __resource::#res::new())
+                quote!(#res: _resource::#res::new())
             }
         })
         .collect::<Vec<_>>();
 
     root.push(quote! {
         mod idle {
-            extern crate #krate;
-
             #[allow(unused_imports)]
-            use self::#krate::Resource;
+            use ::#k::Resource;
 
             #[allow(dead_code)]
             pub struct Context {
                 pub resources: Resources,
-                pub threshold: #krate::Threshold<#krate::U0>,
+                pub priority: ::#k::Priority<::#k::_impl::U0>,
             }
 
             #[allow(unsafe_code)]
@@ -1031,7 +1083,7 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
                 pub unsafe fn new() -> Self {
                     Context {
                         resources: Resources::new(),
-                        threshold: #krate::Threshold::new(),
+                        priority: ::#k::Priority::_new(),
                     }
                 }
             }
@@ -1057,36 +1109,31 @@ pub fn app(ctxt: &Context, app: &App) -> Tokens {
     let init = &app.init.path;
     root.push(quote! {
         #[allow(unsafe_code)]
+        #[allow(unused_mut)]
         #[deny(const_err)]
-        fn main() {
+        #[no_mangle]
+        pub unsafe extern "C" fn main() -> ! {
             #[allow(unused_imports)]
-            use #hidden::#krate::Resource;
+            use ::#k::Resource;
             #[allow(unused_imports)]
             use #device::Interrupt;
 
-            #[allow(unused_mut)]
-            unsafe {
-                let init: fn(init::Context) -> init::LateResources = #init;
-                let idle: fn(idle::Context) -> ! = #idle;
+            let init: fn(init::Context) -> init::LateResources = #init;
+            let idle: fn(idle::Context) -> ! = #idle;
 
-                #hidden::#krate::interrupt::disable();
+            ::#k::_impl::interrupt::disable();
 
-                let (mut core, mut dwt, mut _nvic, mut _syst) = #hidden::#krate::Core::steal();
+            let mut p = ::#k::_impl::steal();
 
-                #(#pre_init)*
+            #(#pre_init)*
 
-                let _lr = init(init::Context::new(core));
+            let _lr = init(init::Context::new(#core));
 
-                #(#post_init)*
+            #(#post_init)*
 
-                // Set the system baseline to zero
-                dwt.enable_cycle_counter();
-                dwt.cyccnt.write(0);
+            ::#k::_impl::interrupt::enable();
 
-                #hidden::#krate::interrupt::enable();
-
-                idle(idle::Context::new())
-            }
+            idle(idle::Context::new())
         }
     });
 
diff --git a/src/instant.rs b/src/_impl/instant.rs
similarity index 77%
rename from src/instant.rs
rename to src/_impl/instant.rs
index d9a96104b8..4df911f425 100644
--- a/src/instant.rs
+++ b/src/_impl/instant.rs
@@ -1,11 +1,10 @@
 use core::cmp::Ordering;
-use core::ops;
+use core::{ops, ptr};
 
 use cortex_m::peripheral::DWT;
 
-#[doc(hidden)]
 #[derive(Clone, Copy, Debug)]
-pub struct Instant(u32);
+pub struct Instant(pub u32);
 
 impl Into<u32> for Instant {
     fn into(self) -> u32 {
@@ -14,12 +13,11 @@ impl Into<u32> for Instant {
 }
 
 impl Instant {
-    pub unsafe fn new(timestamp: u32) -> Self {
-        Instant(timestamp)
-    }
-
     pub fn now() -> Self {
-        Instant(DWT::get_cycle_count())
+        const DWT_CYCCNT: *const u32 = 0xE000_1004 as *const u32;
+
+        // NOTE(ptr::read) don't use a volatile load to let the compiler optimize this away
+        Instant(unsafe { ptr::read(DWT_CYCCNT) })
     }
 }
 
diff --git a/src/_impl/mod.rs b/src/_impl/mod.rs
new file mode 100644
index 0000000000..90c28bd487
--- /dev/null
+++ b/src/_impl/mod.rs
@@ -0,0 +1,68 @@
+use core::mem;
+
+pub use self::instant::Instant;
+pub use self::tq::{dispatch, NotReady, TimerQueue};
+pub use cortex_m::interrupt;
+use cortex_m::interrupt::Nr;
+pub use cortex_m::peripheral::syst::SystClkSource;
+use cortex_m::peripheral::{CBP, CPUID, DCB, DWT, FPB, FPU, ITM, MPU, NVIC, SCB, SYST, TPIU};
+use heapless::RingBuffer as Queue;
+pub use typenum::consts::*;
+pub use typenum::{Max, Maximum, Unsigned};
+
+mod instant;
+mod tq;
+
+pub type FreeQueue<N> = Queue<u8, N, u8>;
+pub type ReadyQueue<T, N> = Queue<(T, u8), N, u8>;
+
+#[cfg(feature = "timer-queue")]
+pub struct Peripherals<'a> {
+    pub CBP: CBP,
+    pub CPUID: CPUID,
+    pub DCB: DCB,
+    pub FPB: FPB,
+    pub FPU: FPU,
+    pub ITM: ITM,
+    pub MPU: MPU,
+    // pub NVIC: NVIC,
+    pub SCB: &'a mut SCB,
+    pub TPIU: TPIU,
+}
+
+#[cfg(not(feature = "timer-queue"))]
+pub struct Peripherals {
+    pub CBP: CBP,
+    pub CPUID: CPUID,
+    pub DCB: DCB,
+    pub DWT: DWT,
+    pub FPB: FPB,
+    pub FPU: FPU,
+    pub ITM: ITM,
+    pub MPU: MPU,
+    // pub NVIC: NVIC,
+    pub SCB: SCB,
+    pub SYST: SYST,
+    pub TPIU: TPIU,
+}
+
+pub fn trigger<I>(interrupt: I)
+where
+    I: Nr,
+{
+    unsafe { mem::transmute::<(), NVIC>(()).set_pending(interrupt) }
+}
+
+pub const unsafe fn uninitialized<T>() -> T {
+    #[allow(unions_with_drop_fields)]
+    union U<T> {
+        some: T,
+        none: (),
+    }
+
+    U { none: () }.some
+}
+
+pub unsafe fn steal() -> ::cortex_m::Peripherals {
+    ::cortex_m::Peripherals::steal()
+}
diff --git a/src/tq.rs b/src/_impl/tq.rs
similarity index 62%
rename from src/tq.rs
rename to src/_impl/tq.rs
index 19412fe961..6cb5392d44 100644
--- a/src/tq.rs
+++ b/src/_impl/tq.rs
@@ -5,48 +5,47 @@ use heapless::binary_heap::{BinaryHeap, Min};
 use heapless::ArrayLength;
 use typenum::{Max, Maximum, Unsigned};
 
-use instant::Instant;
-use resource::{Resource, Threshold};
+use _impl::Instant;
+use resource::{Priority, Resource};
 
-pub struct Message<T> {
-    pub baseline: Instant,
+pub struct NotReady<T> {
+    pub scheduled_time: Instant,
     pub index: u8,
     pub task: T,
 }
 
-impl<T> Eq for Message<T> {}
+impl<T> Eq for NotReady<T> {}
 
-impl<T> Ord for Message<T> {
-    fn cmp(&self, other: &Message<T>) -> Ordering {
-        self.baseline.cmp(&other.baseline)
+impl<T> Ord for NotReady<T> {
+    fn cmp(&self, other: &NotReady<T>) -> Ordering {
+        self.scheduled_time.cmp(&other.scheduled_time)
     }
 }
 
-impl<T> PartialEq for Message<T> {
-    fn eq(&self, other: &Message<T>) -> bool {
-        self.baseline == other.baseline
+impl<T> PartialEq for NotReady<T> {
+    fn eq(&self, other: &NotReady<T>) -> bool {
+        self.scheduled_time == other.scheduled_time
     }
 }
 
-impl<T> PartialOrd for Message<T> {
-    fn partial_cmp(&self, other: &Message<T>) -> Option<Ordering> {
+impl<T> PartialOrd for NotReady<T> {
+    fn partial_cmp(&self, other: &NotReady<T>) -> Option<Ordering> {
         Some(self.cmp(other))
     }
 }
 
-#[doc(hidden)]
 pub struct TimerQueue<T, N>
 where
-    N: ArrayLength<Message<T>>,
+    N: ArrayLength<NotReady<T>>,
     T: Copy,
 {
     pub syst: SYST,
-    pub queue: BinaryHeap<Message<T>, N, Min>,
+    pub queue: BinaryHeap<NotReady<T>, N, Min>,
 }
 
 impl<T, N> TimerQueue<T, N>
 where
-    N: ArrayLength<Message<T>>,
+    N: ArrayLength<NotReady<T>>,
     T: Copy,
 {
     pub const fn new(syst: SYST) -> Self {
@@ -57,13 +56,13 @@ where
     }
 
     #[inline]
-    pub unsafe fn enqueue(&mut self, m: Message<T>) {
+    pub unsafe fn enqueue(&mut self, m: NotReady<T>) {
         let mut is_empty = true;
         if self.queue
             .peek()
             .map(|head| {
                 is_empty = false;
-                m.baseline < head.baseline
+                m.scheduled_time < head.scheduled_time
             })
             .unwrap_or(true)
         {
@@ -79,22 +78,22 @@ where
     }
 }
 
-pub fn dispatch<T, TQ, N, F, P>(t: &mut Threshold<P>, tq: &mut TQ, mut f: F)
+pub fn dispatch<T, TQ, N, F, P>(t: &mut Priority<P>, tq: &mut TQ, mut f: F)
 where
-    F: FnMut(&mut Threshold<P>, T, u8),
-    Maximum<P, TQ::Ceiling>: Unsigned,
-    N: 'static + ArrayLength<Message<T>>,
-    P: Unsigned + Max<TQ::Ceiling>,
+    F: FnMut(&mut Priority<P>, T, u8),
+    N: 'static + ArrayLength<NotReady<T>>,
+    P: Max<TQ::Ceiling> + Unsigned,
     T: 'static + Copy + Send,
     TQ: Resource<Data = TimerQueue<T, N>>,
+    TQ::Ceiling: Unsigned,
 {
     loop {
         let next = tq.claim_mut(t, |tq, _| {
-            if let Some(bl) = tq.queue.peek().map(|p| p.baseline) {
-                let diff = bl - Instant::now();
+            if let Some(st) = tq.queue.peek().map(|p| p.scheduled_time) {
+                let diff = st - Instant::now();
 
                 if diff < 0 {
-                    // message ready
+                    // became ready
                     let m = unsafe { tq.queue.pop_unchecked() };
 
                     Some((m.task, m.index))
diff --git a/src/lib.rs b/src/lib.rs
index 08df42ba7c..e5716061bd 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -12,118 +12,37 @@ extern crate cortex_m_rtfm_macros;
 extern crate heapless;
 extern crate typenum;
 
-mod instant;
-mod resource;
-#[cfg(feature = "timer-queue")]
-mod tq;
-
 use core::mem;
 
-#[doc(hidden)]
-pub use cortex_m::interrupt;
-use cortex_m::interrupt::Nr;
-#[doc(hidden)]
-pub use cortex_m::peripheral::syst::SystClkSource;
-#[cfg(any(has_fpu, target_arch = "x86_64"))]
-use cortex_m::peripheral::FPU;
-use cortex_m::peripheral::{Peripherals, CPUID, DCB, DWT, MPU, NVIC, SCB, SYST};
-#[cfg(any(armv7m, target_arch = "x86_64"))]
-use cortex_m::peripheral::{CBP, FPB, ITM, TPIU};
+use cortex_m::interrupt::{self, Nr};
 pub use cortex_m_rtfm_macros::app;
 use heapless::ring_buffer::RingBuffer;
-pub use typenum::consts::*;
-pub use typenum::{Max, Maximum, Unsigned};
+use typenum::consts::*;
+use typenum::Unsigned;
 
-pub use instant::Instant;
-pub use resource::{Resource, Threshold};
-#[cfg(feature = "timer-queue")]
-pub use tq::{dispatch, Message, TimerQueue};
+pub use resource::{Priority, Resource};
 
-pub type PayloadQueue<T, N> = RingBuffer<(T, u8), N, u8>;
-pub type SlotQueue<N> = RingBuffer<u8, N, u8>;
-pub type Ceiling<R> = <R as Resource>::Ceiling;
+#[doc(hidden)]
+pub mod _impl;
+mod resource;
 
-pub struct Core {
-    #[cfg(any(armv7m, target_arch = "x86_64"))]
-    pub CBP: CBP,
-    pub CPUID: CPUID,
-    pub DCB: DCB,
-    // pub DWT: DWT,
-    #[cfg(any(armv7m, target_arch = "x86_64"))]
-    pub FPB: FPB,
-    #[cfg(any(has_fpu, target_arch = "x86_64"))]
-    pub FPU: FPU,
-    #[cfg(any(armv7m, target_arch = "x86_64"))]
-    pub ITM: ITM,
-    pub MPU: MPU,
-    pub SCB: SCB,
-    // pub SYST: SYST,
-    #[cfg(any(armv7m, target_arch = "x86_64"))]
-    pub TPIU: TPIU,
-}
-
-impl Core {
-    pub unsafe fn steal() -> (Core, DWT, NVIC, SYST) {
-        let p = Peripherals::steal();
-
-        (
-            Core {
-                #[cfg(any(armv7m, target_arch = "x86_64"))]
-                CBP: p.CBP,
-                CPUID: p.CPUID,
-                DCB: p.DCB,
-                #[cfg(any(armv7m, target_arch = "x86_64"))]
-                FPB: p.FPB,
-                #[cfg(any(has_fpu, target_arch = "x86_64"))]
-                FPU: p.FPU,
-                #[cfg(any(armv7m, target_arch = "x86_64"))]
-                ITM: p.ITM,
-                MPU: p.MPU,
-                SCB: p.SCB,
-                #[cfg(any(armv7m, target_arch = "x86_64"))]
-                TPIU: p.TPIU,
-            },
-            p.DWT,
-            p.NVIC,
-            p.SYST,
-        )
-    }
-}
-
-pub fn atomic<R, P, F>(t: &mut Threshold<P>, f: F) -> R
+/// TODO
+pub fn atomic<R, P, F>(t: &mut Priority<P>, f: F) -> R
 where
-    F: FnOnce(&mut Threshold<U255>) -> R,
+    F: FnOnce(&mut Priority<U255>) -> R,
     P: Unsigned,
 {
     unsafe {
+        // Sanity check
         debug_assert!(P::to_u8() <= 255);
 
         if P::to_u8() < 255 {
             interrupt::disable();
-            let r = f(&mut Threshold::new());
+            let r = f(&mut Priority::_new());
             interrupt::enable();
             r
         } else {
-            f(&mut Threshold::new())
+            f(&mut Priority::_new())
         }
     }
 }
-
-#[doc(hidden)]
-pub const unsafe fn uninitialized<T>() -> T {
-    #[allow(unions_with_drop_fields)]
-    union U<T> {
-        some: T,
-        none: (),
-    }
-
-    U { none: () }.some
-}
-
-#[doc(hidden)]
-pub unsafe fn set_pending<I>(interrupt: I)
-where
-    I: Nr,
-{
-    mem::transmute::<(), NVIC>(()).set_pending(interrupt)
-}
diff --git a/src/resource.rs b/src/resource.rs
index 67870cb7be..05b6f6d0c8 100644
--- a/src/resource.rs
+++ b/src/resource.rs
@@ -6,91 +6,97 @@ use cortex_m::register::basepri;
 use typenum::type_operators::IsGreaterOrEqual;
 use typenum::{Max, Maximum, True, Unsigned};
 
-pub struct Threshold<N>
-where
-    N: Unsigned,
-{
+/// TODO
+pub struct Priority<N> {
     _not_send_or_sync: PhantomData<*const ()>,
     _n: PhantomData<N>,
 }
 
-impl<N> Threshold<N>
-where
-    N: Unsigned,
-{
-    pub unsafe fn new() -> Self {
-        Threshold {
+impl<N> Priority<N> {
+    #[doc(hidden)]
+    pub unsafe fn _new() -> Self {
+        Priority {
             _not_send_or_sync: PhantomData,
             _n: PhantomData,
         }
     }
 }
 
+/// TODO
 pub unsafe trait Resource {
     #[doc(hidden)]
     const NVIC_PRIO_BITS: u8;
-    type Ceiling: Unsigned;
+
+    /// TODO
+    type Ceiling;
+
+    /// TODO
     type Data: 'static + Send;
 
+    // The `static mut` variable that the resource protects fs
     #[doc(hidden)]
-    unsafe fn get() -> &'static mut Self::Data;
+    unsafe fn _var() -> &'static mut Self::Data;
 
+    /// TODO
     #[inline(always)]
-    fn borrow<'cs, P>(&'cs self, _t: &'cs Threshold<P>) -> &'cs Self::Data
+    fn borrow<'cs, P>(&'cs self, _p: &'cs Priority<P>) -> &'cs Self::Data
     where
-        P: IsGreaterOrEqual<Self::Ceiling, Output = True> + Unsigned,
+        P: IsGreaterOrEqual<Self::Ceiling, Output = True>,
     {
-        unsafe { Self::get() }
+        unsafe { Self::_var() }
     }
 
+    /// TODO
     #[inline(always)]
-    fn borrow_mut<'cs, P>(&'cs mut self, _t: &'cs Threshold<P>) -> &'cs mut Self::Data
+    fn borrow_mut<'cs, P>(&'cs mut self, _p: &'cs Priority<P>) -> &'cs mut Self::Data
     where
-        P: IsGreaterOrEqual<Self::Ceiling, Output = True> + Unsigned,
+        P: IsGreaterOrEqual<Self::Ceiling, Output = True>,
     {
-        unsafe { Self::get() }
+        unsafe { Self::_var() }
     }
 
+    /// TODO
     #[inline(always)]
-    fn claim<'cs, R, F, P>(&self, _t: &mut Threshold<P>, f: F) -> R
+    fn claim<'cs, R, F, P>(&self, _p: &mut Priority<P>, f: F) -> R
     where
-        F: FnOnce(&Self::Data, &mut Threshold<Maximum<P, Self::Ceiling>>) -> R,
+        F: FnOnce(&Self::Data, &mut Priority<Maximum<P, Self::Ceiling>>) -> R,
         P: Max<Self::Ceiling> + Unsigned,
-        Maximum<P, Self::Ceiling>: Unsigned,
+        Self::Ceiling: Unsigned,
     {
         unsafe {
             if P::to_u8() >= Self::Ceiling::to_u8() {
-                f(Self::get(), &mut Threshold::new())
+                f(Self::_var(), &mut Priority::_new())
             } else {
                 let max = 1 << Self::NVIC_PRIO_BITS;
                 let new = (max - Self::Ceiling::to_u8()) << (8 - Self::NVIC_PRIO_BITS);
 
                 let old = basepri::read();
                 basepri::write(new);
-                let r = f(Self::get(), &mut Threshold::new());
+                let r = f(Self::_var(), &mut Priority::_new());
                 basepri::write(old);
                 r
             }
         }
     }
 
+    /// TODO
     #[inline(always)]
-    fn claim_mut<'cs, R, F, P>(&mut self, _t: &mut Threshold<P>, f: F) -> R
+    fn claim_mut<'cs, R, F, P>(&mut self, _p: &mut Priority<P>, f: F) -> R
     where
-        F: FnOnce(&mut Self::Data, &mut Threshold<Maximum<P, Self::Ceiling>>) -> R,
+        F: FnOnce(&mut Self::Data, &mut Priority<Maximum<P, Self::Ceiling>>) -> R,
         P: Max<Self::Ceiling> + Unsigned,
-        Maximum<P, Self::Ceiling>: Unsigned,
+        Self::Ceiling: Unsigned,
     {
         unsafe {
             if P::to_u8() >= Self::Ceiling::to_u8() {
-                f(Self::get(), &mut Threshold::new())
+                f(Self::_var(), &mut Priority::_new())
             } else {
                 let max = 1 << Self::NVIC_PRIO_BITS;
                 let new = (max - Self::Ceiling::to_u8()) << (8 - Self::NVIC_PRIO_BITS);
 
                 let old = basepri::read();
                 basepri::write(new);
-                let r = f(Self::get(), &mut Threshold::new());
+                let r = f(Self::_var(), &mut Priority::_new());
                 basepri::write(old);
                 r
             }
diff --git a/tests/cfail.rs b/tests/cfail.rs
index fdfbf7e6f2..bd6f7fb322 100644
--- a/tests/cfail.rs
+++ b/tests/cfail.rs
@@ -11,11 +11,13 @@ fn cfail() {
     config.mode = Mode::CompileFail;
     config.src_base = PathBuf::from(format!("tests/cfail"));
     config.target = "x86_64-unknown-linux-gnu".to_owned();
-    config.target_rustcflags =
-        Some("-C panic=abort \
-              -L target/debug/deps \
-              -L target/x86_64-unknown-linux-gnu/debug \
-              -L target/x86_64-unknown-linux-gnu/debug/deps ".to_string());
+    config.target_rustcflags = Some(
+        "-C panic=abort \
+         -L target/debug/deps \
+         -L target/x86_64-unknown-linux-gnu/debug \
+         -L target/x86_64-unknown-linux-gnu/debug/deps "
+            .to_string(),
+    );
 
     compiletest::run_tests(&config);
 }
diff --git a/tests/cfail/critical-section.rs b/tests/cfail/critical-section.rs
index 45cc4114cd..ca521a80a7 100644
--- a/tests/cfail/critical-section.rs
+++ b/tests/cfail/critical-section.rs
@@ -2,6 +2,7 @@
 #![deny(warnings)]
 #![feature(const_fn)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
@@ -34,15 +35,15 @@ fn init(_ctxt: init::Context) -> init::LateResources {
 }
 
 fn idle(mut ctxt: idle::Context) -> ! {
-    let t = &mut ctxt.threshold;
+    let p = &mut ctxt.priority;
     let on = ctxt.resources.ON;
 
-    let state = rtfm::atomic(t, |t| {
+    let state = rtfm::atomic(p, |p| {
         // ERROR borrow can't escape this *global* critical section
-        on.borrow(t) //~ error cannot infer an appropriate lifetime
+        on.borrow(p) //~ error cannot infer an appropriate lifetime
     });
 
-    let state = on.claim(t, |state, _t| {
+    let state = on.claim(p, |state, _p| {
         // ERROR borrow can't escape this critical section
         state //~ error cannot infer an appropriate lifetime
     });
diff --git a/tests/cfail/duplicated-task.rs b/tests/cfail/duplicated-task.rs
deleted file mode 100644
index 92d7afda12..0000000000
--- a/tests/cfail/duplicated-task.rs
+++ /dev/null
@@ -1,29 +0,0 @@
-#![deny(unsafe_code)]
-#![deny(warnings)]
-#![feature(proc_macro)]
-#![no_std]
-
-extern crate cortex_m_rtfm as rtfm;
-extern crate stm32f103xx;
-
-use rtfm::app;
-
-app! { //~ error proc macro panicked
-    device: stm32f103xx,
-
-    tasks: {
-        a: {
-            interrupt: EXTI0, //~ error this interrupt is already bound to another task
-            // priority: 1,
-        },
-
-        b: {
-            interrupt: EXTI0,
-            priority: 2,
-        },
-    },
-}
-
-fn init(_ctxt: init::Context) -> init::LateResources {}
-
-fn idle(_ctxt: idle::Context) -> ! {}
diff --git a/tests/cfail/exception.rs b/tests/cfail/exception.rs
index 4e27205069..2adb8dbec6 100644
--- a/tests/cfail/exception.rs
+++ b/tests/cfail/exception.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/idle.rs b/tests/cfail/idle.rs
index 4011bd2e54..2ccfda53f2 100644
--- a/tests/cfail/idle.rs
+++ b/tests/cfail/idle.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/init-resource-share-idle.rs b/tests/cfail/init-resource-share-idle.rs
index 811eea77ae..f795b0a800 100644
--- a/tests/cfail/init-resource-share-idle.rs
+++ b/tests/cfail/init-resource-share-idle.rs
@@ -1,5 +1,6 @@
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/init-resource-share-task.rs b/tests/cfail/init-resource-share-task.rs
index c5730d5aeb..a8db75a407 100644
--- a/tests/cfail/init-resource-share-task.rs
+++ b/tests/cfail/init-resource-share-task.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/init.rs b/tests/cfail/init.rs
index d195049d79..aab1c06ed4 100644
--- a/tests/cfail/init.rs
+++ b/tests/cfail/init.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/interrupt.rs b/tests/cfail/interrupt.rs
index 3df3ffe073..d481fe512b 100644
--- a/tests/cfail/interrupt.rs
+++ b/tests/cfail/interrupt.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/late-resource-init.rs b/tests/cfail/late-resource-init.rs
index df2dcff053..795399aede 100644
--- a/tests/cfail/late-resource-init.rs
+++ b/tests/cfail/late-resource-init.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/lock.rs b/tests/cfail/lock.rs
index 1a650bd412..7706bd173a 100644
--- a/tests/cfail/lock.rs
+++ b/tests/cfail/lock.rs
@@ -2,6 +2,7 @@
 #![deny(warnings)]
 #![feature(const_fn)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
@@ -51,19 +52,19 @@ fn idle(_ctxt: idle::Context) -> ! {
 #[allow(non_snake_case)]
 fn exti0(mut ctxt: exti0::Context) {
     let exti0::Resources { ON, mut MAX } = ctxt.resources;
-    let t = &mut ctxt.threshold;
+    let p = &mut ctxt.priority;
 
     // ERROR need to lock to access the resource because priority < ceiling
     {
-        let _on = ON.borrow(t);
+        let _on = ON.borrow(p);
         //~^ error type mismatch resolving
     }
 
     // OK need to lock to access the resource
-    if ON.claim(t, |on, _| *on) {}
+    if ON.claim(p, |on, _| *on) {}
 
     // OK can claim a resource with maximum ceiling
-    MAX.claim_mut(t, |max, _| *max += 1);
+    MAX.claim_mut(p, |max, _| *max += 1);
 }
 
 #[allow(non_snake_case)]
diff --git a/tests/cfail/priority-too-high.rs b/tests/cfail/priority-too-high.rs
index bab21ecfd2..5002f0b9f3 100644
--- a/tests/cfail/priority-too-high.rs
+++ b/tests/cfail/priority-too-high.rs
@@ -1,4 +1,5 @@
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/priority-too-low.rs b/tests/cfail/priority-too-low.rs
index 7010ff2452..369b66dcbe 100644
--- a/tests/cfail/priority-too-low.rs
+++ b/tests/cfail/priority-too-low.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/resource-alias.rs b/tests/cfail/resource-alias.rs
index 84aaea4e1d..44a6f10153 100644
--- a/tests/cfail/resource-alias.rs
+++ b/tests/cfail/resource-alias.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/resource-not-send-sync.rs b/tests/cfail/resource-not-send-sync.rs
index bb3c9859eb..0e550daca5 100644
--- a/tests/cfail/resource-not-send-sync.rs
+++ b/tests/cfail/resource-not-send-sync.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
diff --git a/tests/cfail/token-outlive.rs b/tests/cfail/token-outlive.rs
index f354594f4c..74a066d89c 100644
--- a/tests/cfail/token-outlive.rs
+++ b/tests/cfail/token-outlive.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
@@ -41,8 +42,8 @@ fn idle(_ctxt: idle::Context) -> ! {
 
 fn exti0(ctxt: exti0::Context) {
     // ERROR token should not outlive the critical section
-    let t = &mut ctxt.threshold;
-    let t = ctxt.resources.STATE.claim(t, |_state, t| t);
+    let op = &mut ctxt.priority;
+    let p = ctxt.resources.STATE.claim(op, |_state, ip| ip);
     //~^ error cannot infer an appropriate lifetime
 }
 
diff --git a/tests/cfail/token-transfer.rs b/tests/cfail/token-transfer.rs
index 92e5d89164..adac222f11 100644
--- a/tests/cfail/token-transfer.rs
+++ b/tests/cfail/token-transfer.rs
@@ -2,6 +2,7 @@
 #![deny(warnings)]
 #![feature(const_fn)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
@@ -9,14 +10,14 @@ extern crate panic_itm;
 extern crate stm32f103xx;
 extern crate typenum;
 
-use rtfm::{app, Threshold};
+use rtfm::{app, Priority};
 use typenum::consts::U1;
 
 app! { //~ error bound `*const (): core::marker::Send` is not satisfied
     device: stm32f103xx,
 
     resources: {
-        static TOKEN: Option<Threshold<U1>> = None;
+        static TOKEN: Option<Priority<U1>> = None;
     },
 
     idle: {
@@ -31,7 +32,9 @@ app! { //~ error bound `*const (): core::marker::Send` is not satisfied
     }
 }
 
-fn init(_ctxt: init::Context) {}
+fn init(_ctxt: init::Context) -> init::LateResources {
+    init::LateResources {}
+}
 
 fn idle(_ctxt: idle::Context) -> ! {
     loop {}
diff --git a/tests/cfail/wrong-threshold.rs b/tests/cfail/wrong-threshold.rs
index 2346e5fecb..1343c0f0ba 100644
--- a/tests/cfail/wrong-threshold.rs
+++ b/tests/cfail/wrong-threshold.rs
@@ -1,6 +1,7 @@
 #![deny(unsafe_code)]
 #![deny(warnings)]
 #![feature(proc_macro)]
+#![no_main]
 #![no_std]
 
 extern crate cortex_m_rtfm as rtfm;
@@ -41,13 +42,13 @@ fn idle(_ctxt: idle::Context) -> ! {
 }
 
 fn exti0(mut ctxt: exti0::Context) {
-    let ot = &mut ctxt.threshold;
+    let op = &mut ctxt.priority;
     let exti0::Resources { A, B } = ctxt.resources;
 
-    A.claim(ot, |_a, _it| {
-        //~^ error closure requires unique access to `ot` but `*ot` is already borrowed
-        // ERROR must use inner token `it` instead of the outer one (`ot`)
-        B.claim(ot, |_b, _| {})
+    A.claim(op, |_a, _ip| {
+        //~^ error closure requires unique access to `op` but `*op` is already borrowed
+        // ERROR must use inner token `_ip` instead of the outer one (`op`)
+        B.claim(op, |_b, _| {})
     });
 }