Disable the playground on all of these

This commit is contained in:
datdenkikniet 2023-04-23 15:33:56 +02:00
parent 0807aa548c
commit a66540efa0
30 changed files with 103 additions and 103 deletions

View file

@ -34,7 +34,7 @@ This activates the monotonics making it possible to use them.
See the following example: See the following example:
``` rust ``` rust,noplayground
{{#include ../../../../examples/schedule.rs}} {{#include ../../../../examples/schedule.rs}}
``` ```
@ -54,7 +54,7 @@ which allows canceling or rescheduling of the task scheduled to run in the futur
If `cancel` or `reschedule_at`/`reschedule_after` returns an `Err` it means that the operation was If `cancel` or `reschedule_at`/`reschedule_after` returns an `Err` it means that the operation was
too late and that the task is already sent for execution. The following example shows this in action: too late and that the task is already sent for execution. The following example shows this in action:
``` rust ``` rust,noplayground
{{#include ../../../../examples/cancel-reschedule.rs}} {{#include ../../../../examples/cancel-reschedule.rs}}
``` ```

View file

@ -27,7 +27,7 @@ cortex-m-rtic = "0.5.3"
The only code change that needs to be made is that any reference to `rtfm` before now need to point The only code change that needs to be made is that any reference to `rtfm` before now need to point
to `rtic` as follows: to `rtic` as follows:
``` rust ``` rust,noplayground
// //
// Change this // Change this
// //

View file

@ -42,7 +42,7 @@ framework: `resources`, `spawn`, `schedule` -- these variables will become
fields of the `Context` structure. Each function within the `#[rtfm::app]` item fields of the `Context` structure. Each function within the `#[rtfm::app]` item
gets a different `Context` type. gets a different `Context` type.
``` rust ``` rust,noplayground
#[rtfm::app(/* .. */)] #[rtfm::app(/* .. */)]
const APP: () = { const APP: () = {
// change this // change this
@ -90,7 +90,7 @@ const APP: () = {
The syntax used to declare resources has changed from `static mut` The syntax used to declare resources has changed from `static mut`
variables to a `struct Resources`. variables to a `struct Resources`.
``` rust ``` rust,noplayground
#[rtfm::app(/* .. */)] #[rtfm::app(/* .. */)]
const APP: () = { const APP: () = {
// change this // change this
@ -118,7 +118,7 @@ the `device` field of the `init::Context` structure.
Change this: Change this:
``` rust ``` rust,noplayground
#[rtfm::app(/* .. */)] #[rtfm::app(/* .. */)]
const APP: () = { const APP: () = {
#[init] #[init]
@ -132,7 +132,7 @@ const APP: () = {
Into this: Into this:
``` rust ``` rust,noplayground
#[rtfm::app(/* .. */, peripherals = true)] #[rtfm::app(/* .. */, peripherals = true)]
// ^^^^^^^^^^^^^^^^^^ // ^^^^^^^^^^^^^^^^^^
const APP: () = { const APP: () = {
@ -155,7 +155,7 @@ attribute with the `binds` argument instead.
Change this: Change this:
``` rust ``` rust,noplayground
#[rtfm::app(/* .. */)] #[rtfm::app(/* .. */)]
const APP: () = { const APP: () = {
// hardware tasks // hardware tasks
@ -175,7 +175,7 @@ const APP: () = {
Into this: Into this:
``` rust ``` rust,noplayground
#[rtfm::app(/* .. */)] #[rtfm::app(/* .. */)]
const APP: () = { const APP: () = {
#[task(binds = SVCall)] #[task(binds = SVCall)]
@ -212,7 +212,7 @@ ensure it is enabled by the application inside `init`.
Change this: Change this:
``` rust ``` rust,noplayground
use rtfm::{Duration, Instant, U32Ext}; use rtfm::{Duration, Instant, U32Ext};
#[rtfm::app(/* .. */)] #[rtfm::app(/* .. */)]
@ -226,7 +226,7 @@ const APP: () = {
Into this: Into this:
``` rust ``` rust,noplayground
use rtfm::cyccnt::{Duration, Instant, U32Ext}; use rtfm::cyccnt::{Duration, Instant, U32Ext};
// ^^^^^^^^ // ^^^^^^^^

View file

@ -12,7 +12,7 @@ With the support of attributes on modules the `const APP` workaround is not need
Change Change
``` rust ``` rust,noplayground
#[rtic::app(/* .. */)] #[rtic::app(/* .. */)]
const APP: () = { const APP: () = {
[code here] [code here]
@ -21,7 +21,7 @@ const APP: () = {
into into
``` rust ``` rust,noplayground
#[rtic::app(/* .. */)] #[rtic::app(/* .. */)]
mod app { mod app {
[code here] [code here]
@ -75,7 +75,7 @@ mod app {
Change Change
``` rust ``` rust,noplayground
#[rtic::app(/* .. */)] #[rtic::app(/* .. */)]
const APP: () = { const APP: () = {
[code here] [code here]
@ -92,7 +92,7 @@ const APP: () = {
into into
``` rust ``` rust,noplayground
#[rtic::app(/* .. */, dispatchers = [SSI0, QEI0])] #[rtic::app(/* .. */, dispatchers = [SSI0, QEI0])]
mod app { mod app {
[code here] [code here]
@ -106,7 +106,7 @@ This works also for ram functions, see examples/ramfunc.rs
Previously the RTIC resources had to be in in a struct named exactly "Resources": Previously the RTIC resources had to be in in a struct named exactly "Resources":
``` rust ``` rust,noplayground
struct Resources { struct Resources {
// Resources defined in here // Resources defined in here
} }
@ -115,7 +115,7 @@ struct Resources {
With RTIC v1.0.0 the resources structs are annotated similarly like With RTIC v1.0.0 the resources structs are annotated similarly like
`#[task]`, `#[init]`, `#[idle]`: with the attributes `#[shared]` and `#[local]` `#[task]`, `#[init]`, `#[idle]`: with the attributes `#[shared]` and `#[local]`
``` rust ``` rust,noplayground
#[shared] #[shared]
struct MySharedResources { struct MySharedResources {
// Resources shared between tasks are defined here // Resources shared between tasks are defined here
@ -136,7 +136,7 @@ In v1.0.0 resources are split between `shared` resources and `local` resources.
In v0.5.x: In v0.5.x:
``` rust ``` rust,noplayground
struct Resources { struct Resources {
local_to_b: i64, local_to_b: i64,
shared_by_a_and_b: i64, shared_by_a_and_b: i64,
@ -151,7 +151,7 @@ fn b(_: b::Context) {}
In v1.0.0: In v1.0.0:
``` rust ``` rust,noplayground
#[shared] #[shared]
struct Shared { struct Shared {
shared_by_a_and_b: i64, shared_by_a_and_b: i64,
@ -176,7 +176,7 @@ to be used for all `shared` resource access.
In old code one could do the following as the high priority In old code one could do the following as the high priority
task has exclusive access to the resource: task has exclusive access to the resource:
``` rust ``` rust,noplayground
#[task(priority = 2, resources = [r])] #[task(priority = 2, resources = [r])]
fn foo(cx: foo::Context) { fn foo(cx: foo::Context) {
cx.resources.r = /* ... */; cx.resources.r = /* ... */;
@ -190,7 +190,7 @@ fn bar(cx: bar::Context) {
And with symmetric locks one needs to use locks in both tasks: And with symmetric locks one needs to use locks in both tasks:
``` rust ``` rust,noplayground
#[task(priority = 2, shared = [r])] #[task(priority = 2, shared = [r])]
fn foo(cx: foo::Context) { fn foo(cx: foo::Context) {
cx.shared.r.lock(|r| r = /* ... */); cx.shared.r.lock(|r| r = /* ... */);
@ -211,7 +211,7 @@ This is still possible in 1.0: the `#[shared]` resource must be annotated with t
v0.5 code: v0.5 code:
``` rust ``` rust,noplayground
struct Resources { struct Resources {
counter: u64, counter: u64,
} }
@ -229,7 +229,7 @@ fn b(cx: b::Context) {
v1.0 code: v1.0 code:
``` rust ``` rust,noplayground
#[shared] #[shared]
struct Shared { struct Shared {
#[lock_free] #[lock_free]
@ -254,7 +254,7 @@ Instead of that syntax, use the `local` argument in `#[init]`.
v0.5.x code: v0.5.x code:
``` rust ``` rust,noplayground
#[init] #[init]
fn init(_: init::Context) { fn init(_: init::Context) {
static mut BUFFER: [u8; 1024] = [0; 1024]; static mut BUFFER: [u8; 1024] = [0; 1024];
@ -264,7 +264,7 @@ fn init(_: init::Context) {
v1.0.0 code: v1.0.0 code:
``` rust ``` rust,noplayground
#[init(local = [ #[init(local = [
buffer: [u8; 1024] = [0; 1024] buffer: [u8; 1024] = [0; 1024]
// type ^^^^^^^^^^^^ ^^^^^^^^^ initial value // type ^^^^^^^^^^^^ ^^^^^^^^^ initial value
@ -282,7 +282,7 @@ In order to make the API more symmetric the #[init]-task always returns a late r
From this: From this:
``` rust ``` rust,noplayground
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965)]
const APP: () = { const APP: () = {
#[init] #[init]
@ -296,7 +296,7 @@ const APP: () = {
to this: to this:
``` rust ``` rust,noplayground
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965)]
mod app { mod app {
#[shared] #[shared]
@ -321,7 +321,7 @@ mod app {
With the new spawn/spawn_after/spawn_at interface, With the new spawn/spawn_after/spawn_at interface,
old code requiring the context `cx` for spawning such as: old code requiring the context `cx` for spawning such as:
``` rust ``` rust,noplayground
#[task(spawn = [bar])] #[task(spawn = [bar])]
fn foo(cx: foo::Context) { fn foo(cx: foo::Context) {
cx.spawn.bar().unwrap(); cx.spawn.bar().unwrap();
@ -335,7 +335,7 @@ fn bar(cx: bar::Context) {
Will now be written as: Will now be written as:
``` rust ``` rust,noplayground
#[task] #[task]
fn foo(_c: foo::Context) { fn foo(_c: foo::Context) {
bar::spawn().unwrap(); bar::spawn().unwrap();

View file

@ -27,6 +27,6 @@ Overall, the generated code infers no additional overhead in comparison to a han
To give a flavour of RTIC, the following example contains commonly used features. To give a flavour of RTIC, the following example contains commonly used features.
In the following sections we will go through each feature in detail. In the following sections we will go through each feature in detail.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/common.rs}} {{#include ../../../../rtic/examples/common.rs}}
``` ```

View file

@ -11,7 +11,7 @@ Like in `init`, locally declared resources will have `'static` lifetimes that ar
The example below shows that `idle` runs after `init`. The example below shows that `idle` runs after `init`.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/idle.rs}} {{#include ../../../../rtic/examples/idle.rs}}
``` ```
@ -38,7 +38,7 @@ The following example shows how to enable sleep by setting the
[WFI]: https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Instruction-Set/Miscellaneous-instructions/WFI [WFI]: https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Instruction-Set/Miscellaneous-instructions/WFI
[NOP]: https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Instruction-Set/Miscellaneous-instructions/NOP [NOP]: https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Instruction-Set/Miscellaneous-instructions/NOP
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/idle-wfi.rs}} {{#include ../../../../rtic/examples/idle-wfi.rs}}
``` ```

View file

@ -16,7 +16,7 @@ The example below shows the types of the `core`, `device` and `cs` fields, and s
The `device` field is only available when the `peripherals` argument is set to the default value `true`. The `device` field is only available when the `peripherals` argument is set to the default value `true`.
In the rare case you want to implement an ultra-slim application you can explicitly set `peripherals` to `false`. In the rare case you want to implement an ultra-slim application you can explicitly set `peripherals` to `false`.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/init.rs}} {{#include ../../../../rtic/examples/init.rs}}
``` ```

View file

@ -2,7 +2,7 @@
This is the smallest possible RTIC application: This is the smallest possible RTIC application:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/smallest.rs}} {{#include ../../../../rtic/examples/smallest.rs}}
``` ```

View file

@ -33,7 +33,7 @@ Task Priority
The following example showcases the priority based scheduling of tasks: The following example showcases the priority based scheduling of tasks:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/preempt.rs}} {{#include ../../../../rtic/examples/preempt.rs}}
``` ```

View file

@ -2,7 +2,7 @@
Channels can be used to communicate data between running tasks. The channel is essentially a wait queue, allowing tasks with multiple producers and a single receiver. A channel is constructed in the `init` task and backed by statically allocated memory. Send and receive endpoints are distributed to *software* tasks: Channels can be used to communicate data between running tasks. The channel is essentially a wait queue, allowing tasks with multiple producers and a single receiver. A channel is constructed in the `init` task and backed by statically allocated memory. Send and receive endpoints are distributed to *software* tasks:
``` rust ``` rust,noplayground
... ...
const CAPACITY: usize = 5; const CAPACITY: usize = 5;
#[init] #[init]
@ -22,7 +22,7 @@ Channels can also be used from *hardware* tasks, but only in a non-`async` manne
The `send` method post a message on the channel as shown below: The `send` method post a message on the channel as shown below:
``` rust ``` rust,noplayground
#[task] #[task]
async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY>) { async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY>) {
hprintln!("Sender 1 sending: 1"); hprintln!("Sender 1 sending: 1");
@ -34,7 +34,7 @@ async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY
The receiver can `await` incoming messages: The receiver can `await` incoming messages:
``` rust ``` rust,noplayground
#[task] #[task]
async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CAPACITY>) { async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CAPACITY>) {
while let Ok(val) = receiver.recv().await { while let Ok(val) = receiver.recv().await {
@ -48,7 +48,7 @@ Channels are implemented using a small (global) *Critical Section* (CS) for prot
For a complete example: For a complete example:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-channel.rs}} {{#include ../../../../rtic/examples/async-channel.rs}}
``` ```
@ -64,7 +64,7 @@ Also sender endpoint can be awaited. In case the channel capacity has not yet be
In the following example the `CAPACITY` has been reduced to 1, forcing sender tasks to wait until the data in the channel has been received. In the following example the `CAPACITY` has been reduced to 1, forcing sender tasks to wait until the data in the channel has been received.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-channel-done.rs}} {{#include ../../../../rtic/examples/async-channel-done.rs}}
``` ```
@ -81,7 +81,7 @@ $ cargo run --target thumbv7m-none-eabi --example async-channel-done --features
In case all senders have been dropped `await`-ing on an empty receiver channel results in an error. This allows to gracefully implement different types of shutdown operations. In case all senders have been dropped `await`-ing on an empty receiver channel results in an error. This allows to gracefully implement different types of shutdown operations.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-channel-no-sender.rs}} {{#include ../../../../rtic/examples/async-channel-no-sender.rs}}
``` ```
@ -97,7 +97,7 @@ Similarly, `await`-ing on a send channel results in an error in case the receive
The resulting error returns the data back to the sender, allowing the sender to take appropriate action (e.g., storing the data to later retry sending it). The resulting error returns the data back to the sender, allowing the sender to take appropriate action (e.g., storing the data to later retry sending it).
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-channel-no-receiver.rs}} {{#include ../../../../rtic/examples/async-channel-no-receiver.rs}}
``` ```
@ -115,7 +115,7 @@ Using the Try API, you can send or receive data from or to a channel without req
This API is exposed through `Receiver::try_recv` and `Sender::try_send`. This API is exposed through `Receiver::try_recv` and `Sender::try_send`.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-channel-try.rs}} {{#include ../../../../rtic/examples/async-channel-try.rs}}
``` ```

View file

@ -7,7 +7,7 @@ This can be achieved by instantiating a monotonic timer (for implementations, se
[`rtic-monotonics`]: https://github.com/rtic-rs/rtic/tree/master/rtic-monotonics [`rtic-monotonics`]: https://github.com/rtic-rs/rtic/tree/master/rtic-monotonics
[`rtic-time`]: https://github.com/rtic-rs/rtic/tree/master/rtic-time [`rtic-time`]: https://github.com/rtic-rs/rtic/tree/master/rtic-time
``` rust ``` rust,noplayground
... ...
{{#include ../../../../rtic/examples/async-timeout.rs:init}} {{#include ../../../../rtic/examples/async-timeout.rs:init}}
... ...
@ -15,7 +15,7 @@ This can be achieved by instantiating a monotonic timer (for implementations, se
A *software* task can `await` the delay to expire: A *software* task can `await` the delay to expire:
``` rust ``` rust,noplayground
#[task] #[task]
async fn foo(_cx: foo::Context) { async fn foo(_cx: foo::Context) {
... ...
@ -34,7 +34,7 @@ Similarly the channels implementation, the timer-queue implementation relies on
<details> <details>
<summary>A complete example</summary> <summary>A complete example</summary>
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-delay.rs}} {{#include ../../../../rtic/examples/async-delay.rs}}
``` ```
@ -58,7 +58,7 @@ A common use case is transactions with an associated timeout. In the examples sh
Using the `select_biased` macro from the `futures` crate it may look like this: Using the `select_biased` macro from the `futures` crate it may look like this:
``` rust,noplayground ``` rust,noplayground,noplayground
{{#include ../../../../rtic/examples/async-timeout.rs:select_biased}} {{#include ../../../../rtic/examples/async-timeout.rs:select_biased}}
``` ```
@ -70,7 +70,7 @@ Using `select_biased` any number of futures can be combined, so its very powerfu
Rewriting the second example from above using `timeout_after` gives: Rewriting the second example from above using `timeout_after` gives:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-timeout.rs:timeout_at_basic}} {{#include ../../../../rtic/examples/async-timeout.rs:timeout_at_basic}}
``` ```
@ -78,7 +78,7 @@ In cases where you want exact control over time without drift we can use exact p
[fugit]: https://crates.io/crates/fugit [fugit]: https://crates.io/crates/fugit
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-timeout.rs:timeout_at}} {{#include ../../../../rtic/examples/async-timeout.rs:timeout_at}}
@ -99,7 +99,7 @@ For the third iteration, with `n == 2`, `hal_get` will take 550ms to finish, in
<details> <details>
<summary>A complete example</summary> <summary>A complete example</summary>
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/async-timeout.rs}} {{#include ../../../../rtic/examples/async-timeout.rs}}
``` ```

View file

@ -19,7 +19,7 @@ Beware of using interrupt vectors that are used internally by hardware features;
The example below demonstrates the use of the `#[task(binds = InterruptName)]` attribute to declare a hardware task bound to an interrupt handler. The example below demonstrates the use of the `#[task(binds = InterruptName)]` attribute to declare a hardware task bound to an interrupt handler.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/hardware.rs}} {{#include ../../../../rtic/examples/hardware.rs}}
``` ```

View file

@ -10,7 +10,7 @@ pending spawns of `foo`. Exceeding this capacity is an `Error`.
The number of arguments to a task is not limited: The number of arguments to a task is not limited:
``` rust ``` rust,noplayground
{{#include ../../../../examples/message_passing.rs}} {{#include ../../../../examples/message_passing.rs}}
``` ```

View file

@ -25,7 +25,7 @@ Types of `#[local]` resources must implement a [`Send`] trait as they are being
The example application shown below contains three tasks `foo`, `bar` and `idle`, each having access to its own `#[local]` resource. The example application shown below contains three tasks `foo`, `bar` and `idle`, each having access to its own `#[local]` resource.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/locals.rs}} {{#include ../../../../rtic/examples/locals.rs}}
``` ```
@ -51,7 +51,7 @@ Types of `#[task(local = [..])]` resources have to be neither [`Send`] nor [`Syn
In the example below the different uses and lifetimes are shown: In the example below the different uses and lifetimes are shown:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/declared_locals.rs}} {{#include ../../../../rtic/examples/declared_locals.rs}}
``` ```
@ -76,7 +76,7 @@ The critical section created by the `lock` API is based on dynamic priorities: i
In the example below we have three interrupt handlers with priorities ranging from one to three. The two handlers with the lower priorities contend for a `shared` resource and need to succeed in locking the resource in order to access its data. The highest priority handler, which does not access the `shared` resource, is free to preempt a critical section created by the lowest priority handler. In the example below we have three interrupt handlers with priorities ranging from one to three. The two handlers with the lower priorities contend for a `shared` resource and need to succeed in locking the resource in order to access its data. The highest priority handler, which does not access the `shared` resource, is free to preempt a critical section created by the lowest priority handler.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/lock.rs}} {{#include ../../../../rtic/examples/lock.rs}}
``` ```
@ -94,7 +94,7 @@ Types of `#[shared]` resources have to be [`Send`].
As an extension to `lock`, and to reduce rightward drift, locks can be taken as tuples. The following examples show this in use: As an extension to `lock`, and to reduce rightward drift, locks can be taken as tuples. The following examples show this in use:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/multilock.rs}} {{#include ../../../../rtic/examples/multilock.rs}}
``` ```
@ -116,7 +116,7 @@ Note that in this release of RTIC it is not possible to request both exclusive a
In the example below a key (e.g. a cryptographic key) is loaded (or created) at runtime (returned by `init`) and then used from two tasks that run at different priorities without any kind of lock. In the example below a key (e.g. a cryptographic key) is loaded (or created) at runtime (returned by `init`) and then used from two tasks that run at different priorities without any kind of lock.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/only-shared-access.rs}} {{#include ../../../../rtic/examples/only-shared-access.rs}}
``` ```
@ -142,7 +142,7 @@ To adhere to the Rust [aliasing] rule, a resource may be either accessed through
Using `#[lock_free]` on resources shared by tasks running at different priorities will result in a *compile-time* error -- not using the `lock` API would violate the aforementioned alias rule. Similarly, for each priority there can be only a single *software* task accessing a shared resource (as an `async` task may yield execution to other *software* or *hardware* tasks running at the same priority). However, under this single-task restriction, we make the observation that the resource is in effect no longer `shared` but rather `local`. Thus, using a `#[lock_free]` shared resource will result in a *compile-time* error -- where applicable, use a `#[local]` resource instead. Using `#[lock_free]` on resources shared by tasks running at different priorities will result in a *compile-time* error -- not using the `lock` API would violate the aforementioned alias rule. Similarly, for each priority there can be only a single *software* task accessing a shared resource (as an `async` task may yield execution to other *software* or *hardware* tasks running at the same priority). However, under this single-task restriction, we make the observation that the resource is in effect no longer `shared` but rather `local`. Thus, using a `#[lock_free]` shared resource will result in a *compile-time* error -- where applicable, use a `#[local]` resource instead.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/lock-free.rs}} {{#include ../../../../rtic/examples/lock-free.rs}}
``` ```

View file

@ -23,7 +23,7 @@ The framework will give a compilation error if there are not enough dispatchers
See the following example: See the following example:
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/spawn.rs}} {{#include ../../../../rtic/examples/spawn.rs}}
``` ```
@ -40,7 +40,7 @@ In the below example, we `spawn` the *software* task `foo` from the `idle` task.
Technically the async executor will `poll` the `foo` *future* which in this case leaves the *future* in a *completed* state. Technically the async executor will `poll` the `foo` *future* which in this case leaves the *future* in a *completed* state.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/spawn_loop.rs}} {{#include ../../../../rtic/examples/spawn_loop.rs}}
``` ```
@ -56,7 +56,7 @@ An attempt to `spawn` an already spawned task (running) task will result in an e
Technically, a `spawn` to a *future* that is not in *completed* state is considered an error. Technically, a `spawn` to a *future* that is not in *completed* state is considered an error.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/spawn_err.rs}} {{#include ../../../../rtic/examples/spawn_err.rs}}
``` ```
@ -71,7 +71,7 @@ $ cargo run --target thumbv7m-none-eabi --example spawn_err
## Passing arguments ## Passing arguments
You can also pass arguments at spawn as follows. You can also pass arguments at spawn as follows.
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/spawn_arguments.rs}} {{#include ../../../../rtic/examples/spawn_arguments.rs}}
``` ```
@ -92,7 +92,7 @@ Conceptually, one can see such tasks as running in the `main` thread of the appl
[Send]: https://doc.rust-lang.org/nomicon/send-and-sync.html [Send]: https://doc.rust-lang.org/nomicon/send-and-sync.html
``` rust ``` rust,noplayground
{{#include ../../../../rtic/examples/zero-prio-task.rs}} {{#include ../../../../rtic/examples/zero-prio-task.rs}}
``` ```

View file

@ -3,7 +3,7 @@
Destructuring task resources might help readability if a task takes multiple Destructuring task resources might help readability if a task takes multiple
resources. Here are two examples on how to split up the resource struct: resources. Here are two examples on how to split up the resource struct:
``` rust ``` rust,noplayground
{{#include ../../../../../rtic/examples/destructure.rs}} {{#include ../../../../../rtic/examples/destructure.rs}}
``` ```

View file

@ -11,7 +11,7 @@ improve performance in some cases.
The example below shows how to place the higher priority task, `bar`, in RAM. The example below shows how to place the higher priority task, `bar`, in RAM.
``` rust ``` rust,noplayground
{{#include ../../../../../rtic/examples/ramfunc.rs}} {{#include ../../../../../rtic/examples/ramfunc.rs}}
``` ```

View file

@ -13,7 +13,7 @@ As this example of approach goes completely outside of RTIC resource model with
Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes. Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
``` rust ``` rust,noplayground
{{#include ../../../../../rtic/examples/pool.rs}} {{#include ../../../../../rtic/examples/pool.rs}}
``` ```

View file

@ -8,7 +8,7 @@ In the following example two different tasks share a [`heapless::spsc::Queue`] f
[`heapless::spsc::Queue`]: https://docs.rs/heapless/0.7.5/heapless/spsc/struct.Queue.html [`heapless::spsc::Queue`]: https://docs.rs/heapless/0.7.5/heapless/spsc/struct.Queue.html
``` rust ``` rust,noplayground
{{#include ../../../../../rtic/examples/static.rs}} {{#include ../../../../../rtic/examples/static.rs}}
``` ```

View file

@ -16,7 +16,7 @@ $ rustfmt target/rtic-expansion.rs
$ tail target/rtic-expansion.rs $ tail target/rtic-expansion.rs
``` ```
``` rust ``` rust,noplayground
#[doc = r" Implementation details"] #[doc = r" Implementation details"]
mod app { mod app {
#[doc = r" Always include the device crate which contains the vector table"] #[doc = r" Always include the device crate which contains the vector table"]

View file

@ -27,7 +27,7 @@ section on [critical sections](critical-sections.html)).
The code below is an example of the kind of source level transformation that The code below is an example of the kind of source level transformation that
happens behind the scenes: happens behind the scenes:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
static mut X: u64: 0; static mut X: u64: 0;
@ -54,7 +54,7 @@ mod app {
The framework produces codes like this: The framework produces codes like this:
``` rust ``` rust,noplayground
fn init(c: init::Context) { fn init(c: init::Context) {
// .. user code .. // .. user code ..
} }

View file

@ -26,7 +26,7 @@ gets a unique reference (`&mut-`) to resources.
An example to illustrate the ceiling analysis: An example to illustrate the ceiling analysis:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
struct Resources { struct Resources {

View file

@ -30,7 +30,7 @@ task we give it a *resource proxy*, whereas we give a unique reference
The example below shows the different types handed out to each task: The example below shows the different types handed out to each task:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mut app { mut app {
struct Resources { struct Resources {
@ -62,7 +62,7 @@ mut app {
Now let's see how these types are created by the framework. Now let's see how these types are created by the framework.
``` rust ``` rust,noplayground
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// .. user code .. // .. user code ..
} }
@ -149,7 +149,7 @@ The semantics of the `BASEPRI` register are as follows:
Thus the dynamic priority at any point in time can be computed as Thus the dynamic priority at any point in time can be computed as
``` rust ``` rust,noplayground
dynamic_priority = max(hw2logical(BASEPRI), hw2logical(static_priority)) dynamic_priority = max(hw2logical(BASEPRI), hw2logical(static_priority))
``` ```
@ -160,7 +160,7 @@ In this particular example we could implement the critical section as follows:
> **NOTE:** this is a simplified implementation > **NOTE:** this is a simplified implementation
``` rust ``` rust,noplayground
impl rtic::Mutex for resources::x { impl rtic::Mutex for resources::x {
type T = u64; type T = u64;
@ -194,7 +194,7 @@ calls to it. This is required for memory safety, as nested calls would produce
multiple unique references (`&mut-`) to `x` breaking Rust aliasing rules. See multiple unique references (`&mut-`) to `x` breaking Rust aliasing rules. See
below: below:
``` rust ``` rust,noplayground
#[interrupt(binds = UART0, priority = 1, resources = [x])] #[interrupt(binds = UART0, priority = 1, resources = [x])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// resource proxy // resource proxy
@ -223,7 +223,7 @@ provides extra information to the compiler.
Consider this program: Consider this program:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
struct Resources { struct Resources {
@ -282,7 +282,7 @@ mod app {
The code generated by the framework looks like this: The code generated by the framework looks like this:
``` rust ``` rust,noplayground
// omitted: user code // omitted: user code
pub mod resources { pub mod resources {
@ -374,7 +374,7 @@ mod app {
At the end the compiler will optimize the function `foo` into something like At the end the compiler will optimize the function `foo` into something like
this: this:
``` rust ``` rust,noplayground
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// NOTE: BASEPRI contains the value `0` (its reset value) at this point // NOTE: BASEPRI contains the value `0` (its reset value) at this point
@ -428,7 +428,7 @@ should not result in an observable change of BASEPRI.
This invariant needs to be preserved to avoid raising the dynamic priority of a This invariant needs to be preserved to avoid raising the dynamic priority of a
handler through preemption. This is best observed in the following example: handler through preemption. This is best observed in the following example:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
struct Resources { struct Resources {
@ -490,7 +490,7 @@ mod app {
IMPORTANT: let's say we *forget* to roll back `BASEPRI` in `UART1` -- this would IMPORTANT: let's say we *forget* to roll back `BASEPRI` in `UART1` -- this would
be a bug in the RTIC code generator. be a bug in the RTIC code generator.
``` rust ``` rust,noplayground
// code generated by RTIC // code generated by RTIC
mod app { mod app {

View file

@ -11,7 +11,7 @@ configuration is done before the `init` function runs.
This example gives you an idea of the code that the RTIC framework runs: This example gives you an idea of the code that the RTIC framework runs:
``` rust ``` rust,noplayground
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965)]
mod app { mod app {
#[init] #[init]
@ -33,7 +33,7 @@ mod app {
The framework generates an entry point that looks like this: The framework generates an entry point that looks like this:
``` rust ``` rust,noplayground
// the real entry point of the program // the real entry point of the program
#[no_mangle] #[no_mangle]
unsafe fn main() -> ! { unsafe fn main() -> ! {

View file

@ -8,7 +8,7 @@ interrupts are disabled.
The example below shows the kind of code that the framework generates to The example below shows the kind of code that the framework generates to
initialize late resources. initialize late resources.
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
struct Resources { struct Resources {
@ -39,7 +39,7 @@ mod app {
The code generated by the framework looks like this: The code generated by the framework looks like this:
``` rust ``` rust,noplayground
fn init(c: init::Context) -> init::LateResources { fn init(c: init::Context) -> init::LateResources {
// .. user code .. // .. user code ..
} }

View file

@ -10,7 +10,7 @@ To reenter a task handler in software its underlying interrupt handler must be
invoked using FFI (see example below). FFI requires `unsafe` code so end users invoked using FFI (see example below). FFI requires `unsafe` code so end users
are discouraged from directly invoking an interrupt handler. are discouraged from directly invoking an interrupt handler.
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
#[init] #[init]
@ -48,7 +48,7 @@ call from user code.
The above example expands into: The above example expands into:
``` rust ``` rust,noplayground
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// .. user code .. // .. user code ..
} }

View file

@ -26,7 +26,7 @@ is treated as a resource contended by the tasks that can `spawn` other tasks.
Let's first take a look the code generated by the framework to dispatch tasks. Let's first take a look the code generated by the framework to dispatch tasks.
Consider this example: Consider this example:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
// .. // ..
@ -57,7 +57,7 @@ mod app {
The framework produces the following task dispatcher which consists of an The framework produces the following task dispatcher which consists of an
interrupt handler and a ready queue: interrupt handler and a ready queue:
``` rust ``` rust,noplayground
fn bar(c: bar::Context) { fn bar(c: bar::Context) {
// .. user code .. // .. user code ..
} }
@ -121,7 +121,7 @@ There's one `Spawn` struct per task.
The `Spawn` code generated by the framework for the previous example looks like The `Spawn` code generated by the framework for the previous example looks like
this: this:
``` rust ``` rust,noplayground
mod foo { mod foo {
// .. // ..
@ -206,7 +206,7 @@ task capacities.
We have omitted how message passing actually works so let's revisit the `spawn` We have omitted how message passing actually works so let's revisit the `spawn`
implementation but this time for task `baz` which receives a `u64` message. implementation but this time for task `baz` which receives a `u64` message.
``` rust ``` rust,noplayground
fn baz(c: baz::Context, input: u64) { fn baz(c: baz::Context, input: u64) {
// .. user code .. // .. user code ..
} }
@ -268,7 +268,7 @@ mod app {
And now let's look at the real implementation of the task dispatcher: And now let's look at the real implementation of the task dispatcher:
``` rust ``` rust,noplayground
mod app { mod app {
// .. // ..
@ -355,7 +355,7 @@ endpoint is owned by a task dispatcher.
Consider the following example: Consider the following example:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
#[idle(spawn = [foo, bar])] #[idle(spawn = [foo, bar])]

View file

@ -10,7 +10,7 @@ appropriate ready queue.
Let's see how this in implemented in code. Consider the following program: Let's see how this in implemented in code. Consider the following program:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
// .. // ..
@ -31,7 +31,7 @@ mod app {
Let's first look at the `schedule` API. Let's first look at the `schedule` API.
``` rust ``` rust,noplayground
mod foo { mod foo {
pub struct Schedule<'a> { pub struct Schedule<'a> {
priority: &'a Cell<u8>, priority: &'a Cell<u8>,
@ -122,7 +122,7 @@ is up.
Let's see the associated code. Let's see the associated code.
``` rust ``` rust,noplayground
mod app { mod app {
#[no_mangle] #[no_mangle]
fn SysTick() { fn SysTick() {
@ -220,7 +220,7 @@ analysis.
To illustrate, consider the following example: To illustrate, consider the following example:
``` rust ``` rust,noplayground
#[rtic::app(device = ..)] #[rtic::app(device = ..)]
mod app { mod app {
#[task(priority = 3, spawn = [baz])] #[task(priority = 3, spawn = [baz])]
@ -269,7 +269,7 @@ an `INSTANTS` buffers used to store the time at which a task was scheduled to
run; this `Instant` is read in the task dispatcher and passed to the user code run; this `Instant` is read in the task dispatcher and passed to the user code
as part of the task context. as part of the task context.
``` rust ``` rust,noplayground
mod app { mod app {
// .. // ..
@ -311,7 +311,7 @@ buffer. The value to be written is stored in the `Spawn` struct and its either
the `start` time of the hardware task or the `scheduled` time of the software the `start` time of the hardware task or the `scheduled` time of the software
task. task.
``` rust ``` rust,noplayground
mod foo { mod foo {
// .. // ..

View file

@ -10,7 +10,7 @@ All software tasks are now required to be `async`.
All of the tasks in your project that do not bind to an interrupt must now be an `async fn`. For example: All of the tasks in your project that do not bind to an interrupt must now be an `async fn`. For example:
``` rust ``` rust,noplayground
#[task( #[task(
local = [ some_resource ], local = [ some_resource ],
shared = [ my_shared_resource ], shared = [ my_shared_resource ],
@ -24,7 +24,7 @@ fn my_task(cx: my_task::Context) {
becomes becomes
``` rust ``` rust,noplayground
#[task( #[task(
local = [ some_resource ], local = [ some_resource ],
shared = [ my_shared_resource ], shared = [ my_shared_resource ],
@ -40,7 +40,7 @@ async fn my_task(cx: my_task::Context) {
The new `async` software tasks are allowed to run forever, on one precondition: **there must be an `await` within the infinite loop of the task**. An example of such a task: The new `async` software tasks are allowed to run forever, on one precondition: **there must be an `await` within the infinite loop of the task**. An example of such a task:
``` rust ``` rust,noplayground
#[task(local = [ my_channel ] )] #[task(local = [ my_channel ] )]
async fn my_task_that_runs_forever(cx: my_task_that_runs_forever::Context) { async fn my_task_that_runs_forever(cx: my_task_that_runs_forever::Context) {
loop { loop {

View file

@ -86,7 +86,7 @@ mod app {
# V2.0.0 # V2.0.0
``` rust ``` rust,noplayground
{{ #include ../../../../examples/stm32f3_blinky/src/main.rs }} {{ #include ../../../../examples/stm32f3_blinky/src/main.rs }}
``` ```