From 53ed7bf7edb21180cb18c0bf6a7dbe6168331879 Mon Sep 17 00:00:00 2001 From: Franz Dietrich Date: Thu, 4 Apr 2024 00:01:46 +0200 Subject: [PATCH] fix included examples and markdown(book) (#912) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix included examples and markdown(book) fixes: #911 * fix footnote pre_init * more example link updates * Restore pool example name * Example: pool: Upgrade to heapless v0.8 * Example: pool: thumbv6 unsupported: wild cfg-if Experiment with multi-backend example contained in the example * Example: lm3s6965: Updated cargo.lock * Book: Use cargo xtask for by-example * Docs: Contributing: cargo xtask --------- Co-authored-by: Henrik TjΓ€der --- CONTRIBUTING.md | 23 ++-- book/en/archive/by_example/monotonic.md | 4 +- book/en/archive/by_example/tips/from_ram.md | 24 ++--- book/en/src/by-example.md | 89 ++++++++++++--- book/en/src/by-example/app.md | 18 ++-- book/en/src/by-example/app_idle.md | 34 +++--- book/en/src/by-example/app_init.md | 19 ++-- book/en/src/by-example/app_minimal.md | 9 +- book/en/src/by-example/app_priorities.md | 12 +-- book/en/src/by-example/channel.md | 78 +++++++------- book/en/src/by-example/delay.md | 57 +++++----- book/en/src/by-example/hardware_tasks.md | 16 +-- book/en/src/by-example/message_passing.md | 2 +- book/en/src/by-example/resources.md | 81 +++++++------- book/en/src/by-example/software_tasks.md | 101 ++++++++--------- book/en/src/by-example/tips/destructureing.md | 13 +-- book/en/src/by-example/tips/indirection.md | 12 +-- .../src/by-example/tips/static_lifetimes.md | 12 +-- examples/lm3s6965/Cargo.lock | 3 +- examples/lm3s6965/Cargo.toml | 1 + examples/lm3s6965/examples/pool.rs | 102 ++++++++++++++++++ examples/lm3s6965/examples/pool.rs_old | 69 ------------ 22 files changed, 437 insertions(+), 342 deletions(-) create mode 100644 examples/lm3s6965/examples/pool.rs delete mode 100644 examples/lm3s6965/examples/pool.rs_old diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9c6a861e94..a027e26a83 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,30 +36,23 @@ Please make sure that tests passes locally before submitting. ### Example check ```shell -> cargo check --examples --target thumbv7m-none-eabi +> cargo xtask example-check ``` -and/or +### Run examples/tests on QEMU device ```shell -> cargo check --examples --target thumbv6m-none-eabi +> cargo xtask qemu ``` -### Run tests with xtask +Will execute examples on your local `qemu` install. -```shell -> cargo xtask --target all -``` - -Will execute `run` tests on your local `qemu` install. -(You may also pass a single target `--target thumbv6m-none-eabi/thumbv7m-none-eabi` during development). - -#### Adding tests to xtask +#### Adding examples/tests to xtask If you have added further tests, you need to add the expected output in the `ci/expected` folder. ```shell -> cargo run --example --target thumbv7m-none-eabi > ci/expected/.run +> cargo xtask qemu --overwrite-expected ``` ### Internal tests @@ -67,7 +60,7 @@ If you have added further tests, you need to add the expected output in the `ci/ Run internal fail tests locally with: ```shell -> cargo test --tests +> cargo xtask test ``` #### Adding tests to internal tests @@ -76,7 +69,7 @@ If you have added fail tests or changed the expected behavior, the expected outp Inspect the error output, when sure that `ACTUAL OUTPUT` is correct you can re-run the test as: ```shell -> TRYBUILD=overwrite cargo test --tests +> TRYBUILD=overwrite cargo xtask test ``` This will update the expected output to match the `ACTUAL OUTPUT`. diff --git a/book/en/archive/by_example/monotonic.md b/book/en/archive/by_example/monotonic.md index 0ed4340d9c..14084d5430 100644 --- a/book/en/archive/by_example/monotonic.md +++ b/book/en/archive/by_example/monotonic.md @@ -39,7 +39,7 @@ See the following example: ``` ``` console -$ cargo run --target thumbv7m-none-eabi --example schedule +$ cargo xtask qemu --verbose --example schedule {{#include ../../../../ci/expected/schedule.run}} ``` @@ -59,6 +59,6 @@ too late and that the task is already sent for execution. The following example ``` ``` console -$ cargo run --target thumbv7m-none-eabi --example cancel-reschedule +$ cargo xtask qemu --verbose --example cancel-reschedule {{#include ../../../../ci/expected/cancel-reschedule.run}} ``` diff --git a/book/en/archive/by_example/tips/from_ram.md b/book/en/archive/by_example/tips/from_ram.md index a153139e5a..47b3cbf8bb 100644 --- a/book/en/archive/by_example/tips/from_ram.md +++ b/book/en/archive/by_example/tips/from_ram.md @@ -11,35 +11,35 @@ improve performance in some cases. The example below shows how to place the higher priority task, `bar`, in RAM. -``` rust,noplayground -{{#include ../../../../../rtic/examples/ramfunc.rs}} +```rust,noplayground +{{#include ../../../../../examples/lm3s6965/examples/ramfunc.rs}} ``` Running this program produces the expected output. -``` console -$ cargo run --target thumbv7m-none-eabi --example ramfunc +```console +$ cargo xtask qemu --verbose --example ramfunc ``` -``` console -{{#include ../../../../../rtic/ci/expected/ramfunc.run}} +```console +{{#include ../../../../../ci/expected/lm3s6965/ramfunc.run}} ``` One can look at the output of `cargo-nm` to confirm that `bar` ended in RAM (`0x2000_0000`), whereas `foo` ended in Flash (`0x0000_0000`). -``` console +```console $ cargo nm --example ramfunc --release | grep ' foo::' ``` -``` console -{{#include ../../../../../rtic/ci/expected/ramfunc.run.grep.foo}} +```console +{{#include ../../../../../ci/expected/lm3s6965/ramfunc.run.grep.foo}} ``` -``` console +```console $ cargo nm --example ramfunc --target thumbv7m-none-eabi --release | grep '*bar::' ``` -``` console -{{#include ../../../../../rtic/ci/expected/ramfunc.run.grep.bar}} +```console +{{#include ../../../../../ci/expected/lm3s6965/ramfunc.run.grep.bar}} ``` diff --git a/book/en/src/by-example.md b/book/en/src/by-example.md index e5cf67a0ec..4d3f537024 100644 --- a/book/en/src/by-example.md +++ b/book/en/src/by-example.md @@ -2,8 +2,8 @@ This part of the book introduces the RTIC framework to new users by walking them through examples of increasing complexity. -All examples in this part of the book are accessible at the -[GitHub repository][repoexamples]. +All examples in this part of the book are part of the +[RTIC repository][repoexamples], found in the `examples` directory. The examples are runnable on QEMU (emulating a Cortex M3 target), thus no special hardware required to follow along. @@ -17,24 +17,85 @@ embedded development environment that includes QEMU. [the embedded Rust book]: https://rust-embedded.github.io/book/intro/install.html -To run the examples found in `examples/` locally, cargo needs a supported `target` and -either `--examples` (run all examples) or `--example NAME` to run a specific example. +To run the examples found in `examples/` locally using QEMU: + +``` +cargo xtask qemu +``` + +This runs all of the examples against the default `thumbv7m-none-eabi` device `lm3s6965`. + +To limit which examples are being run, use the flag `--example `, the name being the filename of the example. Assuming dependencies in place, running: -``` console -$ cargo run --target thumbv7m-none-eabi --example locals +```console +$ cargo xtask qemu --example locals ``` Yields this output: -``` console -{{#include ../../../rtic/ci/expected/locals.run}} +```console + Finished dev [unoptimized + debuginfo] target(s) in 0.07s + Running `target/debug/xtask qemu --example locals` +INFO xtask > Testing for platform: Lm3s6965, backend: Thumbv7 +INFO xtask::run > πŸ‘Ÿ Build example locals (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) +INFO xtask::run > βœ… Success. +INFO xtask::run > πŸ‘Ÿ Run example locals in QEMU (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) +INFO xtask::run > βœ… Success. +INFO xtask::results > βœ… Success: Build example locals (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) +INFO xtask::results > βœ… Success: Run example locals in QEMU (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) +INFO xtask::results > πŸš€πŸš€πŸš€ All tasks succeeded πŸš€πŸš€πŸš€ ``` -> **NOTE**: You can choose target device by passing a target -> triple to cargo (e.g. `cargo run --example init --target thumbv7m-none-eabi`) or -> configure a default target in `.cargo/config.toml`. -> -> For running the examples, we (typically) use a Cortex M3 emulated in QEMU, so the target is `thumbv7m-none-eabi`. -> Since the M3 architecture is backwards compatible to the M0/M0+ architecture, you may also use the `thumbv6m-none-eabi`, in case you want to inspect generated assembly code for the M0/M0+ architecture. +It is great that examples are passing and this is part of the RTIC CI setup too, but for the purposes of this book we must add the `--verbose` flag, or `-v` for short to see the actual program output: + +```console +❯ cargo xtask qemu --verbose --example locals + Finished dev [unoptimized + debuginfo] target(s) in 0.03s + Running `target/debug/xtask qemu --example locals --verbose` + DEBUG xtask > Stderr of child processes is inherited: false + DEBUG xtask > Partial features: false + INFO xtask > Testing for platform: Lm3s6965, backend: Thumbv7 + INFO xtask::run > πŸ‘Ÿ Build example locals (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) + INFO xtask::run > βœ… Success. + INFO xtask::run > πŸ‘Ÿ Run example locals in QEMU (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) + INFO xtask::run > βœ… Success. + INFO xtask::results > βœ… Success: Build example locals (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) + cd examples/lm3s6965 && cargo build --target thumbv7m-none-eabi --features test-critical-section,thumbv7-backend --release --example locals + DEBUG xtask::results > +cd examples/lm3s6965 && cargo build --target thumbv7m-none-eabi --features test-critical-section,thumbv7-backend --release --example locals +Stderr: + Finished release [optimized] target(s) in 0.02s + INFO xtask::results > βœ… Success: Run example locals in QEMU (thumbv7m-none-eabi, release, "test-critical-section,thumbv7-backend", in examples/lm3s6965) + cd examples/lm3s6965 && cargo run --target thumbv7m-none-eabi --features test-critical-section,thumbv7-backend --release --example locals + DEBUG xtask::results > +cd examples/lm3s6965 && cargo run --target thumbv7m-none-eabi --features test-critical-section,thumbv7-backend --release --example locals +Stdout: +bar: local_to_bar = 1 +foo: local_to_foo = 1 +idle: local_to_idle = 1 + +Stderr: + Finished release [optimized] target(s) in 0.02s + Running `qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel target/thumbv7m-none-eabi/release/examples/locals` +Timer with period zero, disabling + + INFO xtask::results > πŸš€πŸš€πŸš€ All tasks succeeded πŸš€πŸš€πŸš€ +``` + +Look for the content following `Stdout:` towards the end ouf the output, the program output should have these lines: + +```console +{{#include ../../../ci/expected/lm3s6965/locals.run}} +``` + +> **NOTE**: +> For other useful options to `cargo xtask`, see: +> ``` +> cargo xtask qemu --help +> ``` +> +> The `--platform` flag allows changing which device examples are run on, +> currently `lm3s6965` is the best supported, work is ongoing to +> increase support for other devices, including both ARM and RISC-V diff --git a/book/en/src/by-example/app.md b/book/en/src/by-example/app.md index 6cdd92a16c..2fd787f762 100644 --- a/book/en/src/by-example/app.md +++ b/book/en/src/by-example/app.md @@ -4,7 +4,7 @@ All RTIC applications use the [`app`] attribute (`#[app(..)]`). This attribute only applies to a `mod`-item containing the RTIC application. -The `app` attribute has a mandatory `device` argument that takes a *path* as a value. This must be a full path pointing to a *peripheral access crate* (PAC) generated using [`svd2rust`] **v0.14.x** or newer. +The `app` attribute has a mandatory `device` argument that takes a _path_ as a value. This must be a full path pointing to a _peripheral access crate_ (PAC) generated using [`svd2rust`] **v0.14.x** or newer. The `app` attribute will expand into a suitable entry point and thus replaces the use of the [`cortex_m_rt::entry`] attribute. @@ -14,13 +14,13 @@ The `app` attribute will expand into a suitable entry point and thus replaces th ## Structure and zero-cost concurrency -An RTIC `app` is an executable system model for single-core applications, declaring a set of `local` and `shared` resources operated on by a set of `init`, `idle`, *hardware* and *software* tasks. +An RTIC `app` is an executable system model for single-core applications, declaring a set of `local` and `shared` resources operated on by a set of `init`, `idle`, _hardware_ and _software_ tasks. -* `init` runs before any other task, and returns the `local` and `shared` resources. -* Tasks (both hardware and software) run preemptively based on their associated static priority. -* Hardware tasks are bound to underlying hardware interrupts. -* Software tasks are schedulied by an set of asynchronous executors, one for each software task priority. -* `idle` has the lowest priority, and can be used for background work, and/or to put the system to sleep until it is woken by some event. +- `init` runs before any other task, and returns the `local` and `shared` resources. +- Tasks (both hardware and software) run preemptively based on their associated static priority. +- Hardware tasks are bound to underlying hardware interrupts. +- Software tasks are schedulied by an set of asynchronous executors, one for each software task priority. +- `idle` has the lowest priority, and can be used for background work, and/or to put the system to sleep until it is woken by some event. At compile time the task/resource model is analyzed under the Stack Resource Policy (SRP) and executable code generated with the following outstanding properties: @@ -41,6 +41,6 @@ Priorities in RTIC follow a higher value = more important scheme. For examples, To give a taste of RTIC, the following example contains commonly used features. In the following sections we will go through each feature in detail. -``` rust,noplayground -{{#include ../../../../rtic/examples/common.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/common.rs}} ``` diff --git a/book/en/src/by-example/app_idle.md b/book/en/src/by-example/app_idle.md index c0b4139c26..ea193a2a5e 100644 --- a/book/en/src/by-example/app_idle.md +++ b/book/en/src/by-example/app_idle.md @@ -1,8 +1,8 @@ # The background task `#[idle]` -A function marked with the `idle` attribute can optionally appear in the module. This becomes the special *idle task* and must have signature `fn(idle::Context) -> !`. +A function marked with the `idle` attribute can optionally appear in the module. This becomes the special _idle task_ and must have signature `fn(idle::Context) -> !`. -When present, the runtime will execute the `idle` task after `init`. Unlike `init`, `idle` will run *with interrupts enabled* and must never return, as the `-> !` function signature indicates. +When present, the runtime will execute the `idle` task after `init`. Unlike `init`, `idle` will run _with interrupts enabled_ and must never return, as the `-> !` function signature indicates. [The Rust type `!` means β€œnever”][nevertype]. [nevertype]: https://doc.rust-lang.org/core/primitive.never.html @@ -11,25 +11,25 @@ Like in `init`, locally declared resources will have `'static` lifetimes that ar The example below shows that `idle` runs after `init`. -``` rust,noplayground -{{#include ../../../../rtic/examples/idle.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/idle.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example idle +```console +$ cargo xtask qemu --verbose --example idle ``` -``` console -{{#include ../../../../rtic/ci/expected/idle.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/idle.run}} ``` By default, the RTIC `idle` task does not try to optimize for any specific targets. A common useful optimization is to enable the [SLEEPONEXIT] and allow the MCU to enter sleep when reaching `idle`. ->**Caution**: some hardware unless configured disables the debug unit during sleep mode. +> **Caution**: some hardware unless configured disables the debug unit during sleep mode. > ->Consult your hardware specific documentation as this is outside the scope of RTIC. +> Consult your hardware specific documentation as this is outside the scope of RTIC. The following example shows how to enable sleep by setting the [`SLEEPONEXIT`][SLEEPONEXIT] and providing a custom `idle` task replacing the default [`nop()`][NOP] with [`wfi()`][WFI]. @@ -38,16 +38,16 @@ The following example shows how to enable sleep by setting the [WFI]: https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Instruction-Set/Miscellaneous-instructions/WFI [NOP]: https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Instruction-Set/Miscellaneous-instructions/NOP -``` rust,noplayground -{{#include ../../../../rtic/examples/idle-wfi.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/idle-wfi.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example idle-wfi +```console +$ cargo xtask qemu --verbose --example idle-wfi ``` -``` console -{{#include ../../../../rtic/ci/expected/idle-wfi.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/idle-wfi.run}} ``` -> **Notice**: The `idle` task cannot be used together with *software* tasks running at priority zero. The reason is that `idle` is running as a non-returning Rust function at priority zero. Thus there would be no way for an executor at priority zero to give control to *software* tasks at the same priority. +> **Notice**: The `idle` task cannot be used together with _software_ tasks running at priority zero. The reason is that `idle` is running as a non-returning Rust function at priority zero. Thus there would be no way for an executor at priority zero to give control to _software_ tasks at the same priority. diff --git a/book/en/src/by-example/app_init.md b/book/en/src/by-example/app_init.md index 52d657a0c6..49f0bc39f8 100644 --- a/book/en/src/by-example/app_init.md +++ b/book/en/src/by-example/app_init.md @@ -3,13 +3,14 @@ An RTIC application requires an `init` task setting up the system. The corresponding `init` function must have the signature `fn(init::Context) -> (Shared, Local)`, where `Shared` and `Local` are resource structures defined by the user. -The `init` task executes after system reset, [after an optionally defined `pre-init` code section][pre-init] and an always occurring internal RTIC initialization. -[pre-init]: https://docs.rs/cortex-m-rt/latest/cortex_m_rt/attr.pre_init.html +The `init` task executes after system reset, [after an optionally defined `pre-init` code section][^pre-init] and an always occurring internal RTIC initialization. -The `init` and optional `pre-init` tasks runs *with interrupts disabled* and have exclusive access to Cortex-M (the `bare_metal::CriticalSection` token is available as `cs`). +The `init` and optional `pre-init` tasks runs _with interrupts disabled_ and have exclusive access to Cortex-M (the `bare_metal::CriticalSection` token is available as `cs`). Device specific peripherals are available through the `core` and `device` fields of `init::Context`. +[^pre-init]: [https://docs.rs/cortex-m-rt/latest/cortex_m_rt/attr.pre_init.html](https://docs.rs/cortex-m-rt/latest/cortex_m_rt/attr.pre_init.html) + ## Example The example below shows the types of the `core`, `device` and `cs` fields, and showcases the use of a `local` variable with `'static` lifetime. Such variables can be delegated from the `init` task to other tasks of the RTIC application. @@ -17,16 +18,16 @@ The example below shows the types of the `core`, `device` and `cs` fields, and s The `device` field is only available when the `peripherals` argument is set to the default value `true`. In the rare case you want to implement an ultra-slim application you can explicitly set `peripherals` to `false`. -``` rust,noplayground -{{#include ../../../../rtic/examples/init.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/init.rs}} ``` Running the example will print `init` to the console and then exit the QEMU process. -``` console -$ cargo run --target thumbv7m-none-eabi --example init +```console +$ cargo xtask qemu --verbose --example init ``` -``` console -{{#include ../../../../rtic/ci/expected/init.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/init.run}} ``` diff --git a/book/en/src/by-example/app_minimal.md b/book/en/src/by-example/app_minimal.md index 2c6f21887b..1712660462 100644 --- a/book/en/src/by-example/app_minimal.md +++ b/book/en/src/by-example/app_minimal.md @@ -2,18 +2,19 @@ This is the smallest possible RTIC application: -``` rust,noplayground -{{#include ../../../../rtic/examples/smallest.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/smallest.rs}} ``` RTIC is designed with resource efficiency in mind. RTIC itself does not rely on any dynamic memory allocation, thus RAM requirement is dependent only on the application. The flash memory footprint is below 1kB including the interrupt vector table. For a minimal example you can expect something like: -``` console + +```console $ cargo size --example smallest --target thumbv7m-none-eabi --release ``` -``` console +```console Finished release [optimized] target(s) in 0.07s text data bss dec hex filename 924 0 0 924 39c smallest diff --git a/book/en/src/by-example/app_priorities.md b/book/en/src/by-example/app_priorities.md index 47032917cc..ca3d957c68 100644 --- a/book/en/src/by-example/app_priorities.md +++ b/book/en/src/by-example/app_priorities.md @@ -33,16 +33,16 @@ Task Priority The following example showcases the priority based scheduling of tasks: -``` rust,noplayground -{{#include ../../../../rtic/examples/preempt.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/preempt.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example preempt -{{#include ../../../../rtic/ci/expected/preempt.run}} +```console +$ cargo xtask qemu --verbose --example preempt +{{#include ../../../../ci/expected/lm3s6965/preempt.run}} ``` -Note that the task `bar` does *not* preempt task `baz` because its priority is the *same* as `baz`'s. The higher priority task `bar` runs before `foo` when `baz`returns. When `bar` returns `foo` can resume. +Note that the task `bar` does _not_ preempt task `baz` because its priority is the _same_ as `baz`'s. The higher priority task `bar` runs before `foo` when `baz`returns. When `bar` returns `foo` can resume. One more note about priorities: choosing a priority higher than what the device supports will result in a compilation error. The error is cryptic due to limitations in the Rust language, if `priority = 9` for task `uart0_interrupt` in `example/common.rs` this looks like: diff --git a/book/en/src/by-example/channel.md b/book/en/src/by-example/channel.md index 75ecbfd938..4bd5b76d59 100644 --- a/book/en/src/by-example/channel.md +++ b/book/en/src/by-example/channel.md @@ -1,8 +1,8 @@ # Communication over channels. -Channels can be used to communicate data between running tasks. The channel is essentially a wait queue, allowing tasks with multiple producers and a single receiver. A channel is constructed in the `init` task and backed by statically allocated memory. Send and receive endpoints are distributed to *software* tasks: +Channels can be used to communicate data between running tasks. The channel is essentially a wait queue, allowing tasks with multiple producers and a single receiver. A channel is constructed in the `init` task and backed by statically allocated memory. Send and receive endpoints are distributed to _software_ tasks: -``` rust,noplayground +```rust,noplayground ... const CAPACITY: usize = 5; #[init] @@ -14,15 +14,15 @@ const CAPACITY: usize = 5; ... ``` -In this case the channel holds data of `u32` type with a capacity of 5 elements. +In this case the channel holds data of `u32` type with a capacity of 5 elements. -Channels can also be used from *hardware* tasks, but only in a non-`async` manner using the [Try API](#try-api). +Channels can also be used from _hardware_ tasks, but only in a non-`async` manner using the [Try API](#try-api). ## Sending data The `send` method post a message on the channel as shown below: -``` rust,noplayground +```rust,noplayground #[task] async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY>) { hprintln!("Sender 1 sending: 1"); @@ -34,7 +34,7 @@ async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY The receiver can `await` incoming messages: -``` rust,noplayground +```rust,noplayground #[task] async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CAPACITY>) { while let Ok(val) = receiver.recv().await { @@ -44,69 +44,69 @@ async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CA } ``` -Channels are implemented using a small (global) *Critical Section* (CS) for protection against race-conditions. The user must provide an CS implementation. Compiling the examples given the `--features test-critical-section` gives one possible implementation. +Channels are implemented using a small (global) _Critical Section_ (CS) for protection against race-conditions. The user must provide an CS implementation. Compiling the examples given the `--features test-critical-section` gives one possible implementation. For a complete example: -``` rust,noplayground -{{#include ../../../../rtic/examples/async-channel.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-channel.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example async-channel --features test-critical-section +```console +$ cargo xtask qemu --verbose --example async-channel --features test-critical-section ``` -``` console -{{#include ../../../../rtic/ci/expected/async-channel.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/async-channel.run}} ``` Also sender endpoint can be awaited. In case the channel capacity has not yet been reached, `await`-ing the sender can progress immediately, while in the case the capacity is reached, the sender is blocked until there is free space in the queue. In this way data is never lost. In the following example the `CAPACITY` has been reduced to 1, forcing sender tasks to wait until the data in the channel has been received. -``` rust,noplayground -{{#include ../../../../rtic/examples/async-channel-done.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-channel-done.rs}} ``` -Looking at the output, we find that `Sender 2` will wait until the data sent by `Sender 1` as been received. +Looking at the output, we find that `Sender 2` will wait until the data sent by `Sender 1` as been received. -> **NOTICE** *Software* tasks at the same priority are executed asynchronously to each other, thus **NO** strict order can be assumed. (The presented order here applies only to the current implementation, and may change between RTIC framework releases.) +> **NOTICE** _Software_ tasks at the same priority are executed asynchronously to each other, thus **NO** strict order can be assumed. (The presented order here applies only to the current implementation, and may change between RTIC framework releases.) -``` console -$ cargo run --target thumbv7m-none-eabi --example async-channel-done --features test-critical-section -{{#include ../../../../rtic/ci/expected/async-channel-done.run}} +```console +$ cargo xtask qemu --verbose --example async-channel-done --features test-critical-section +{{#include ../../../../ci/expected/lm3s6965/async-channel-done.run}} ``` ## Error handling In case all senders have been dropped `await`-ing on an empty receiver channel results in an error. This allows to gracefully implement different types of shutdown operations. -``` rust,noplayground -{{#include ../../../../rtic/examples/async-channel-no-sender.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-channel-no-sender.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example async-channel-no-sender --features test-critical-section +```console +$ cargo xtask qemu --verbose --example async-channel-no-sender --features test-critical-section ``` -``` console -{{#include ../../../../rtic/ci/expected/async-channel-no-sender.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/async-channel-no-sender.run}} ``` Similarly, `await`-ing on a send channel results in an error in case the receiver has been dropped. This allows to gracefully implement application level error handling. The resulting error returns the data back to the sender, allowing the sender to take appropriate action (e.g., storing the data to later retry sending it). -``` rust,noplayground -{{#include ../../../../rtic/examples/async-channel-no-receiver.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-channel-no-receiver.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example async-channel-no-receiver --features test-critical-section +```console +$ cargo xtask qemu --verbose --example async-channel-no-receiver --features test-critical-section ``` -``` console -{{#include ../../../../rtic/ci/expected/async-channel-no-receiver.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/async-channel-no-receiver.run}} ``` ## Try API @@ -115,14 +115,14 @@ Using the Try API, you can send or receive data from or to a channel without req This API is exposed through `Receiver::try_recv` and `Sender::try_send`. -``` rust,noplayground -{{#include ../../../../rtic/examples/async-channel-try.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-channel-try.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example async-channel-try --features test-critical-section +```console +$ cargo xtask qemu --verbose --example async-channel-try --features test-critical-section ``` -``` console -{{#include ../../../../rtic/ci/expected/async-channel-try.run}} -``` \ No newline at end of file +```console +{{#include ../../../../ci/expected/lm3s6965/async-channel-try.run}} +``` diff --git a/book/en/src/by-example/delay.md b/book/en/src/by-example/delay.md index facd595cf4..b99a4d17f8 100644 --- a/book/en/src/by-example/delay.md +++ b/book/en/src/by-example/delay.md @@ -1,6 +1,6 @@ # Tasks with delay -A convenient way to express miniminal timing requirements is by delaying progression. +A convenient way to express miniminal timing requirements is by delaying progression. This can be achieved by instantiating a monotonic timer (for implementations, see [`rtic-monotonics`]): @@ -9,15 +9,15 @@ This can be achieved by instantiating a monotonic timer (for implementations, se [`Monotonic`]: https://docs.rs/rtic-time/latest/rtic_time/trait.Monotonic.html [Implementing a `Monotonic`]: ../monotonic_impl.md -``` rust,noplayground +```rust,noplayground ... -{{#include ../../../../rtic/examples/async-timeout.rs:init}} +{{#include ../../../../examples/lm3s6965/examples/async-timeout.rs:init}} ... ``` -A *software* task can `await` the delay to expire: +A _software_ task can `await` the delay to expire: -``` rust,noplayground +```rust,noplayground #[task] async fn foo(_cx: foo::Context) { ... @@ -30,16 +30,16 @@ async fn foo(_cx: foo::Context) {
A complete example -``` rust,noplayground -{{#include ../../../../rtic/examples/async-delay.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-delay.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example async-delay --features test-critical-section +```console +$ cargo xtask qemu --verbose --example async-delay --features test-critical-section ``` -``` console -{{#include ../../../../rtic/ci/expected/async-delay.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/async-delay.run}} ```
@@ -53,54 +53,55 @@ Rust [`Future`]s (underlying Rust `async`/`await`) are composable. This makes it [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html -A common use case is transactions with an associated timeout. In the examples shown below, we introduce a fake HAL device that performs some transaction. We have modelled the time it takes based on the input parameter (`n`) as `350ms + n * 100ms`. +A common use case is transactions with an associated timeout. In the examples shown below, we introduce a fake HAL device that performs some transaction. We have modelled the time it takes based on the input parameter (`n`) as `350ms + n * 100ms`. Using the `select_biased` macro from the `futures` crate it may look like this: -``` rust,noplayground,noplayground -{{#include ../../../../rtic/examples/async-timeout.rs:select_biased}} +```rust,noplayground,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-timeout.rs:select_biased}} ``` Assuming the `hal_get` will take 450ms to finish, a short timeout of 200ms will expire before `hal_get` can complete. Extending the timeout to 1000ms would cause `hal_get` will to complete first. -Using `select_biased` any number of futures can be combined, so its very powerful. However, as the timeout pattern is frequently used, more ergonomic support is baked into RTIC, provided by the [`rtic-monotonics`] and [`rtic-time`] crates. +Using `select_biased` any number of futures can be combined, so its very powerful. However, as the timeout pattern is frequently used, more ergonomic support is baked into RTIC, provided by the [`rtic-monotonics`] and [`rtic-time`] crates. Rewriting the second example from above using `timeout_after` gives: -``` rust,noplayground -{{#include ../../../../rtic/examples/async-timeout.rs:timeout_at_basic}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-timeout.rs:timeout_at_basic}} ``` In cases where you want exact control over time without drift we can use exact points in time using `Instant`, and spans of time using `Duration`. Operations on the `Instant` and `Duration` types come from the [`fugit`] crate. [fugit]: https://crates.io/crates/fugit -`let mut instant = Systick::now()` sets the starting time of execution. +`let mut instant = Systick::now()` sets the starting time of execution. -We want to call `hal_get` after 1000ms relative to this starting time. This can be accomplished by using `Systick::delay_until(instant).await`. +We want to call `hal_get` after 1000ms relative to this starting time. This can be accomplished by using `Systick::delay_until(instant).await`. -Then, we define a point in time called `timeout`, and call `Systick::timeout_at(timeout, hal_get(n)).await`. +Then, we define a point in time called `timeout`, and call `Systick::timeout_at(timeout, hal_get(n)).await`. -For the first iteration of the loop, with `n == 0`, the `hal_get` will take 350ms (and finishes before the timeout). +For the first iteration of the loop, with `n == 0`, the `hal_get` will take 350ms (and finishes before the timeout). -For the second iteration, with `n == 1`, the `hal_get` will take 450ms (and again succeeds to finish before the timeout). +For the second iteration, with `n == 1`, the `hal_get` will take 450ms (and again succeeds to finish before the timeout). For the third iteration, with `n == 2`, `hal_get` will take 550ms to finish, in which case we will run into a timeout.
A complete example -``` rust,noplayground -{{#include ../../../../rtic/examples/async-timeout.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/async-timeout.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example async-timeout --features test-critical-section +```console +$ cargo xtask qemu --verbose --example async-timeout --features test-critical-section ``` -``` console -{{#include ../../../../rtic/ci/expected/async-timeout.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/async-timeout.run}} ``` +
diff --git a/book/en/src/by-example/hardware_tasks.md b/book/en/src/by-example/hardware_tasks.md index ded488c421..4cd30b7c5e 100644 --- a/book/en/src/by-example/hardware_tasks.md +++ b/book/en/src/by-example/hardware_tasks.md @@ -4,11 +4,11 @@ At its core RTIC is using a hardware interrupt controller ([ARM NVIC on cortex-m To bind a task to an interrupt, use the `#[task]` attribute argument `binds = InterruptName`. This task then becomes the interrupt handler for this hardware interrupt vector. -All tasks bound to an explicit interrupt are called *hardware tasks* since they start execution in reaction to a hardware event. +All tasks bound to an explicit interrupt are called _hardware tasks_ since they start execution in reaction to a hardware event. Specifying a non-existing interrupt name will cause a compilation error. The interrupt names are commonly defined by [PAC or HAL][pacorhal] crates. -Any available interrupt vector should work. Specific devices may bind specific interrupt priorities to specific interrupt vectors outside user code control. See for example the [nRF β€œsoftdevice”](https://github.com/rtic-rs/rtic/issues/434). +Any available interrupt vector should work. Specific devices may bind specific interrupt priorities to specific interrupt vectors outside user code control. See for example the [nRF β€œsoftdevice”](https://github.com/rtic-rs/rtic/issues/434). Beware of using interrupt vectors that are used internally by hardware features; RTIC is unaware of such hardware specific details. @@ -19,14 +19,14 @@ Beware of using interrupt vectors that are used internally by hardware features; The example below demonstrates the use of the `#[task(binds = InterruptName)]` attribute to declare a hardware task bound to an interrupt handler. -``` rust,noplayground -{{#include ../../../../rtic/examples/hardware.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/hardware.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example hardware +```console +$ cargo xtask qemu --verbose --example hardware ``` -``` console -{{#include ../../../../rtic/ci/expected/hardware.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/hardware.run}} ``` diff --git a/book/en/src/by-example/message_passing.md b/book/en/src/by-example/message_passing.md index 02fd298fac..ac16097160 100644 --- a/book/en/src/by-example/message_passing.md +++ b/book/en/src/by-example/message_passing.md @@ -15,6 +15,6 @@ The number of arguments to a task is not limited: ``` ``` console -$ cargo run --target thumbv7m-none-eabi --example message_passing +$ cargo xtask qemu --verbose --example message_passing {{#include ../../../../ci/expected/message_passing.run}} ``` diff --git a/book/en/src/by-example/resources.md b/book/en/src/by-example/resources.md index c2472bc2c2..8c26d8ec1e 100644 --- a/book/en/src/by-example/resources.md +++ b/book/en/src/by-example/resources.md @@ -9,7 +9,7 @@ Declaration of system-wide resources is done by annotating **two** `struct`s wit Each task must declare the resources it intends to access in its corresponding metadata attribute using the `local` and `shared` arguments. Each argument takes a list of resource identifiers. The listed resources are made available to the context under the `local` and `shared` fields of the `Context` structure. The `init` task returns the initial values for the system-wide (`#[shared]` and `#[local]`) resources. - + @@ -25,18 +25,18 @@ Types of `#[local]` resources must implement a [`Send`] trait as they are being The example application shown below contains three tasks `foo`, `bar` and `idle`, each having access to its own `#[local]` resource. -``` rust,noplayground -{{#include ../../../../rtic/examples/locals.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/locals.rs}} ``` Running the example: -``` console -$ cargo run --target thumbv7m-none-eabi --example locals +```console +$ cargo xtask qemu --verbose --example locals ``` -``` console -{{#include ../../../../rtic/ci/expected/locals.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/locals.run}} ``` Local resources in `#[init]` and `#[idle]` have `'static` lifetimes. This is safe since both tasks are not re-entrant. @@ -51,16 +51,17 @@ Types of `#[task(local = [..])]` resources have to be neither [`Send`] nor [`Syn In the example below the different uses and lifetimes are shown: -``` rust,noplayground -{{#include ../../../../rtic/examples/declared_locals.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/declared_locals.rs}} ``` You can run the application, but as the example is designed merely to showcase the lifetime properties there is no output (it suffices to build the application). -``` console +```console $ cargo build --target thumbv7m-none-eabi --example declared_locals ``` - + + ## `#[shared]` resources and `lock` @@ -69,23 +70,23 @@ Critical sections are required to access `#[shared]` resources in a data race-fr [`Mutex`]: ../../../api/rtic/trait.Mutex.html [`lock`]: ../../../api/rtic/trait.Mutex.html#method.lock -The critical section created by the `lock` API is based on dynamic priorities: it temporarily raises the dynamic priority of the context to a *ceiling* priority that prevents other tasks from preempting the critical section. This synchronization protocol is known as the [Immediate Ceiling Priority Protocol (ICPP)][icpp], and complies with [Stack Resource Policy (SRP)][srp] based scheduling of RTIC. +The critical section created by the `lock` API is based on dynamic priorities: it temporarily raises the dynamic priority of the context to a _ceiling_ priority that prevents other tasks from preempting the critical section. This synchronization protocol is known as the [Immediate Ceiling Priority Protocol (ICPP)][icpp], and complies with [Stack Resource Policy (SRP)][srp] based scheduling of RTIC. [icpp]: https://en.wikipedia.org/wiki/Priority_ceiling_protocol [srp]: https://en.wikipedia.org/wiki/Stack_Resource_Policy In the example below we have three interrupt handlers with priorities ranging from one to three. The two handlers with the lower priorities contend for a `shared` resource and need to succeed in locking the resource in order to access its data. The highest priority handler, which does not access the `shared` resource, is free to preempt a critical section created by the lowest priority handler. -``` rust,noplayground -{{#include ../../../../rtic/examples/lock.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/lock.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example lock +```console +$ cargo xtask qemu --verbose --example lock ``` -``` console -{{#include ../../../../rtic/ci/expected/lock.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/lock.run}} ``` Types of `#[shared]` resources have to be [`Send`]. @@ -94,16 +95,16 @@ Types of `#[shared]` resources have to be [`Send`]. As an extension to `lock`, and to reduce rightward drift, locks can be taken as tuples. The following examples show this in use: -``` rust,noplayground -{{#include ../../../../rtic/examples/multilock.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/multilock.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example multilock +```console +$ cargo xtask qemu --verbose --example multilock ``` -``` console -{{#include ../../../../rtic/ci/expected/multilock.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/multilock.run}} ``` ## Only shared (`&-`) access @@ -112,44 +113,44 @@ By default, the framework assumes that all tasks require exclusive mutable acces The advantage of specifying shared access (`&-`) to a resource is that no locks are required to access the resource even if the resource is contended by more than one task running at different priorities. The downside is that the task only gets a shared reference (`&-`) to the resource, limiting the operations it can perform on it, but where a shared reference is enough this approach reduces the number of required locks. In addition to simple immutable data, this shared access can be useful where the resource type safely implements interior mutability, with appropriate locking or atomic operations of its own. -Note that in this release of RTIC it is not possible to request both exclusive access (`&mut-`) and shared access (`&-`) to the *same* resource from different tasks. Attempting to do so will result in a compile error. +Note that in this release of RTIC it is not possible to request both exclusive access (`&mut-`) and shared access (`&-`) to the _same_ resource from different tasks. Attempting to do so will result in a compile error. In the example below a key (e.g. a cryptographic key) is loaded (or created) at runtime (returned by `init`) and then used from two tasks that run at different priorities without any kind of lock. -``` rust,noplayground -{{#include ../../../../rtic/examples/only-shared-access.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/only-shared-access.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example only-shared-access +```console +$ cargo xtask qemu --verbose --example only-shared-access ``` -``` console -{{#include ../../../../rtic/ci/expected/only-shared-access.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/only-shared-access.run}} ``` ## Lock-free access of shared resources -A critical section is *not* required to access a `#[shared]` resource that's only accessed by tasks running at the *same* priority. In this case, you can opt out of the `lock` API by adding the `#[lock_free]` field-level attribute to the resource declaration (see example below). +A critical section is _not_ required to access a `#[shared]` resource that's only accessed by tasks running at the _same_ priority. In this case, you can opt out of the `lock` API by adding the `#[lock_free]` field-level attribute to the resource declaration (see example below). -To adhere to the Rust [aliasing] rule, a resource may be either accessed through multiple immutable references or a singe mutable reference (but not both at the same time). +To adhere to the Rust [aliasing] rule, a resource may be either accessed through multiple immutable references or a singe mutable reference (but not both at the same time). [aliasing]: https://doc.rust-lang.org/nomicon/aliasing.html -Using `#[lock_free]` on resources shared by tasks running at different priorities will result in a *compile-time* error -- not using the `lock` API would violate the aforementioned alias rule. Similarly, for each priority there can be only a single *software* task accessing a shared resource (as an `async` task may yield execution to other *software* or *hardware* tasks running at the same priority). However, under this single-task restriction, we make the observation that the resource is in effect no longer `shared` but rather `local`. Thus, using a `#[lock_free]` shared resource will result in a *compile-time* error -- where applicable, use a `#[local]` resource instead. +Using `#[lock_free]` on resources shared by tasks running at different priorities will result in a _compile-time_ error -- not using the `lock` API would violate the aforementioned alias rule. Similarly, for each priority there can be only a single _software_ task accessing a shared resource (as an `async` task may yield execution to other _software_ or _hardware_ tasks running at the same priority). However, under this single-task restriction, we make the observation that the resource is in effect no longer `shared` but rather `local`. Thus, using a `#[lock_free]` shared resource will result in a _compile-time_ error -- where applicable, use a `#[local]` resource instead. -``` rust,noplayground -{{#include ../../../../rtic/examples/lock-free.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/lock-free.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example lock-free +```console +$ cargo xtask qemu --verbose --example lock-free ``` -``` console -{{#include ../../../../rtic/ci/expected/lock-free.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/lock-free.run}} ``` diff --git a/book/en/src/by-example/software_tasks.md b/book/en/src/by-example/software_tasks.md index 756150d7f0..5d5ef71b8d 100644 --- a/book/en/src/by-example/software_tasks.md +++ b/book/en/src/by-example/software_tasks.md @@ -2,109 +2,110 @@ The RTIC concept of a software task shares a lot with that of [hardware tasks](./hardware_tasks.md). The core difference is that a software task is not explicitly bound to a specific interrupt vector, but rather bound to a β€œdispatcher” interrupt vector running at the intended priority of the software task (see below). -Similarly to *hardware* tasks, the `#[task]` attribute used on a function declare it as a task. The absence of a `binds = InterruptName` argument to the attribute declares the function as a *software task*. +Similarly to _hardware_ tasks, the `#[task]` attribute used on a function declare it as a task. The absence of a `binds = InterruptName` argument to the attribute declares the function as a _software task_. The static method `task_name::spawn()` spawns (starts) a software task and given that there are no higher priority tasks running the task will start executing directly. -The *software* task itself is given as an `async` Rust function, which allows the user to optionally `await` future events. This allows to blend reactive programming (by means of *hardware* tasks) with sequential programming (by means of *software* tasks). +The _software_ task itself is given as an `async` Rust function, which allows the user to optionally `await` future events. This allows to blend reactive programming (by means of _hardware_ tasks) with sequential programming (by means of _software_ tasks). -While *hardware* tasks are assumed to run-to-completion (and return), *software* tasks may be started (`spawned`) once and run forever, on the condition that any loop (execution path) is broken by at least one `await` (yielding operation). +While _hardware_ tasks are assumed to run-to-completion (and return), _software_ tasks may be started (`spawned`) once and run forever, on the condition that any loop (execution path) is broken by at least one `await` (yielding operation). ## Dispatchers -All *software* tasks at the same priority level share an interrupt handler acting as an async executor dispatching the software tasks. This list of dispatchers, `dispatchers = [FreeInterrupt1, FreeInterrupt2, ...]` is an argument to the `#[app]` attribute, where you define the set of free and usable interrupts. +All _software_ tasks at the same priority level share an interrupt handler acting as an async executor dispatching the software tasks. This list of dispatchers, `dispatchers = [FreeInterrupt1, FreeInterrupt2, ...]` is an argument to the `#[app]` attribute, where you define the set of free and usable interrupts. Each interrupt vector acting as dispatcher gets assigned to one priority level meaning that the list of dispatchers need to cover all priority levels used by software tasks. Example: The `dispatchers =` argument needs to have at least 3 entries for an application using three different priorities for software tasks. -The framework will give a compilation error if there are not enough dispatchers provided, or if a clash occurs between the list of dispatchers and interrupts bound to *hardware* tasks. +The framework will give a compilation error if there are not enough dispatchers provided, or if a clash occurs between the list of dispatchers and interrupts bound to _hardware_ tasks. See the following example: -``` rust,noplayground -{{#include ../../../../rtic/examples/spawn.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/spawn.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example spawn +```console +$ cargo xtask qemu --verbose --example spawn ``` -``` console -{{#include ../../../../rtic/ci/expected/spawn.run}} -``` -You may `spawn` a *software* task again, given that it has run-to-completion (returned). - -In the below example, we `spawn` the *software* task `foo` from the `idle` task. Since the priority of the *software* task is 1 (higher than `idle`), the dispatcher will execute `foo` (preempting `idle`). Since `foo` runs-to-completion. It is ok to `spawn` the `foo` task again. - -Technically the async executor will `poll` the `foo` *future* which in this case leaves the *future* in a *completed* state. - -``` rust,noplayground -{{#include ../../../../rtic/examples/spawn_loop.rs}} +```console +{{#include ../../../../ci/expected/lm3s6965/spawn.run}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example spawn_loop +You may `spawn` a _software_ task again, given that it has run-to-completion (returned). + +In the below example, we `spawn` the _software_ task `foo` from the `idle` task. Since the priority of the _software_ task is 1 (higher than `idle`), the dispatcher will execute `foo` (preempting `idle`). Since `foo` runs-to-completion. It is ok to `spawn` the `foo` task again. + +Technically the async executor will `poll` the `foo` _future_ which in this case leaves the _future_ in a _completed_ state. + +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/spawn_loop.rs}} ``` -``` console -{{#include ../../../../rtic/ci/expected/spawn_loop.run}} +```console +$ cargo xtask qemu --verbose --example spawn_loop ``` -An attempt to `spawn` an already spawned task (running) task will result in an error. Notice, the that the error is reported before the `foo` task is actually run. This is since, the actual execution of the *software* task is handled by the dispatcher interrupt (`SSIO`), which is not enabled until we exit the `init` task. (Remember, `init` runs in a critical section, i.e. all interrupts being disabled.) - -Technically, a `spawn` to a *future* that is not in *completed* state is considered an error. - -``` rust,noplayground -{{#include ../../../../rtic/examples/spawn_err.rs}} +```console +{{#include ../../../../ci/expected/lm3s6965/spawn_loop.run}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example spawn_err +An attempt to `spawn` an already spawned task (running) task will result in an error. Notice, the that the error is reported before the `foo` task is actually run. This is since, the actual execution of the _software_ task is handled by the dispatcher interrupt (`SSIO`), which is not enabled until we exit the `init` task. (Remember, `init` runs in a critical section, i.e. all interrupts being disabled.) + +Technically, a `spawn` to a _future_ that is not in _completed_ state is considered an error. + +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/spawn_err.rs}} ``` -``` console -{{#include ../../../../rtic/ci/expected/spawn_err.run}} +```console +$ cargo xtask qemu --verbose --example spawn_err +``` + +```console +{{#include ../../../../ci/expected/lm3s6965/spawn_err.run}} ``` ## Passing arguments + You can also pass arguments at spawn as follows. -``` rust,noplayground -{{#include ../../../../rtic/examples/spawn_arguments.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/spawn_arguments.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example spawn_arguments +```console +$ cargo xtask qemu --verbose --example spawn_arguments ``` -``` console -{{#include ../../../../rtic/ci/expected/spawn_arguments.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/spawn_arguments.run}} ``` ## Priority zero tasks -In RTIC tasks run preemptively to each other, with priority zero (0) the lowest priority. You can use priority zero tasks for background work, without any strict real-time requirements. +In RTIC tasks run preemptively to each other, with priority zero (0) the lowest priority. You can use priority zero tasks for background work, without any strict real-time requirements. Conceptually, one can see such tasks as running in the `main` thread of the application, thus the resources associated are not required the [Send] bound. [Send]: https://doc.rust-lang.org/nomicon/send-and-sync.html - -``` rust,noplayground -{{#include ../../../../rtic/examples/zero-prio-task.rs}} +```rust,noplayground +{{#include ../../../../examples/lm3s6965/examples/zero-prio-task.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example zero-prio-task +```console +$ cargo xtask qemu --verbose --example zero-prio-task ``` -``` console -{{#include ../../../../rtic/ci/expected/zero-prio-task.run}} +```console +{{#include ../../../../ci/expected/lm3s6965/zero-prio-task.run}} ``` -> **Notice**: *software* task at zero priority cannot co-exist with the [idle] task. The reason is that `idle` is running as a non-returning Rust function at priority zero. Thus there would be no way for an executor at priority zero to give control to *software* tasks at the same priority. +> **Notice**: _software_ task at zero priority cannot co-exist with the [idle] task. The reason is that `idle` is running as a non-returning Rust function at priority zero. Thus there would be no way for an executor at priority zero to give control to _software_ tasks at the same priority. --- -Application side safety: Technically, the RTIC framework ensures that `poll` is never executed on any *software* task with *completed* future, thus adhering to the soundness rules of async Rust. +Application side safety: Technically, the RTIC framework ensures that `poll` is never executed on any _software_ task with _completed_ future, thus adhering to the soundness rules of async Rust. diff --git a/book/en/src/by-example/tips/destructureing.md b/book/en/src/by-example/tips/destructureing.md index 752311d3a7..feaa74b31e 100644 --- a/book/en/src/by-example/tips/destructureing.md +++ b/book/en/src/by-example/tips/destructureing.md @@ -3,13 +3,14 @@ Destructuring task resources might help readability if a task takes multiple resources. Here are two examples on how to split up the resource struct: -``` rust,noplayground -{{#include ../../../../../rtic/examples/destructure.rs}} +```rust,noplayground +{{#include ../../../../../examples/lm3s6965/examples/destructure.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example destructure +```console +$ cargo xtask qemu --verbose --example destructure ``` -``` console -{{#include ../../../../../rtic/ci/expected/destructure.run}} + +```console +{{#include ../../../../../ci/expected/lm3s6965/destructure.run}} ``` diff --git a/book/en/src/by-example/tips/indirection.md b/book/en/src/by-example/tips/indirection.md index aa681905d9..71a198d943 100644 --- a/book/en/src/by-example/tips/indirection.md +++ b/book/en/src/by-example/tips/indirection.md @@ -13,14 +13,14 @@ As this example of approach goes completely outside of RTIC resource model with Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes. -``` rust,noplayground -{{#include ../../../../../rtic/examples/pool.rs}} +```rust,noplayground +{{#include ../../../../../examples/lm3s6965/examples/pool.rs}} ``` -``` console -$ cargo run --target thumbv7m-none-eabi --example pool +```console +$ cargo xtask qemu --verbose --example pool ``` -``` console -{{#include ../../../../../rtic/ci/expected/pool.run}} +```console +{{#include ../../../../../ci/expected/lm3s6965/pool.run}} ``` diff --git a/book/en/src/by-example/tips/static_lifetimes.md b/book/en/src/by-example/tips/static_lifetimes.md index f4e4829f7e..7f4e38f0b8 100644 --- a/book/en/src/by-example/tips/static_lifetimes.md +++ b/book/en/src/by-example/tips/static_lifetimes.md @@ -8,16 +8,16 @@ In the following example two different tasks share a [`heapless::spsc::Queue`] f [`heapless::spsc::Queue`]: https://docs.rs/heapless/0.7.5/heapless/spsc/struct.Queue.html -``` rust,noplayground -{{#include ../../../../../rtic/examples/static.rs}} +```rust,noplayground +{{#include ../../../../../examples/lm3s6965/examples/static.rs}} ``` Running this program produces the expected output. -``` console -$ cargo run --target thumbv7m-none-eabi --example static +```console +$ cargo xtask qemu --verbose --example static ``` -``` console -{{#include ../../../../../rtic/ci/expected/static.run}} +```console +{{#include ../../../../../ci/expected/lm3s6965/static.run}} ``` diff --git a/examples/lm3s6965/Cargo.lock b/examples/lm3s6965/Cargo.lock index 696c606ea2..076a156ea8 100644 --- a/examples/lm3s6965/Cargo.lock +++ b/examples/lm3s6965/Cargo.lock @@ -354,7 +354,7 @@ dependencies = [ [[package]] name = "rtic" -version = "2.1.0" +version = "2.1.1" dependencies = [ "atomic-polyfill", "bare-metal 1.0.0", @@ -429,6 +429,7 @@ name = "rtic_lm3s6965" version = "0.1.0" dependencies = [ "bare-metal 1.0.0", + "cfg-if", "cortex-m", "cortex-m-semihosting", "futures", diff --git a/examples/lm3s6965/Cargo.toml b/examples/lm3s6965/Cargo.toml index 86a7cbbb66..bfa4b29cbe 100644 --- a/examples/lm3s6965/Cargo.toml +++ b/examples/lm3s6965/Cargo.toml @@ -18,6 +18,7 @@ rtic-time = { path = "../../rtic-time" } rtic-sync = { path = "../../rtic-sync" } rtic-monotonics = { path = "../../rtic-monotonics", features = ["cortex-m-systick"] } rtic = { path = "../../rtic" } +cfg-if = "1.0" [dependencies.futures] version = "0.3.26" diff --git a/examples/lm3s6965/examples/pool.rs b/examples/lm3s6965/examples/pool.rs new file mode 100644 index 0000000000..ba04e62f59 --- /dev/null +++ b/examples/lm3s6965/examples/pool.rs @@ -0,0 +1,102 @@ +//! examples/pool.rs + +#![no_main] +#![no_std] +#![deny(warnings)] + +use panic_semihosting as _; +use rtic::app; + +// thumbv6-none-eabi does not support pool +// This might be better worked around in the build system, +// but for proof of concept, let's try having one example +// being different for different backends +// https://docs.rs/heapless/0.8.0/heapless/pool/index.html#target-support +cfg_if::cfg_if! { + if #[cfg(feature = "thumbv6-backend")] { + // Copy of the smallest.rs example + #[app(device = lm3s6965)] + mod app { + use cortex_m_semihosting::debug; + + #[shared] + struct Shared {} + + #[local] + struct Local {} + + #[init] + fn init(_: init::Context) -> (Shared, Local) { + debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator + (Shared {}, Local {}) + } + } + } else { + // Run actual pool example + use heapless::{ + box_pool, + pool::boxed::{Box, BoxBlock}, + }; + + // Declare a pool containing 8-byte memory blocks + box_pool!(P: u8); + + const POOL_CAPACITY: usize = 512; + + #[app(device = lm3s6965, dispatchers = [SSI0, QEI0])] + mod app { + use crate::{Box, BoxBlock, POOL_CAPACITY}; + use cortex_m_semihosting::debug; + use lm3s6965::Interrupt; + + // Import the memory pool into scope + use crate::P; + + #[shared] + struct Shared {} + + #[local] + struct Local {} + + const BLOCK: BoxBlock = BoxBlock::new(); + + #[init(local = [memory: [BoxBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]])] + fn init(cx: init::Context) -> (Shared, Local) { + for block in cx.local.memory { + // Give the 'static memory to the pool + P.manage(block); + } + + rtic::pend(Interrupt::I2C0); + + (Shared {}, Local {}) + } + + #[task(binds = I2C0, priority = 2)] + fn i2c0(_: i2c0::Context) { + // Claim 128 u8 blocks + let x = P.alloc(128).unwrap(); + + // .. send it to the `foo` task + foo::spawn(x).ok().unwrap(); + + // send another 128 u8 blocks to the task `bar` + bar::spawn(P.alloc(128).unwrap()).ok().unwrap(); + } + + #[task] + async fn foo(_: foo::Context, _x: Box

) { + // explicitly return the block to the pool + drop(_x); + + debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator + } + + #[task(priority = 2)] + async fn bar(_: bar::Context, _x: Box

) { + // this is done automatically so we can omit the call to `drop` + // drop(_x); + } + } + } +} diff --git a/examples/lm3s6965/examples/pool.rs_old b/examples/lm3s6965/examples/pool.rs_old deleted file mode 100644 index b399202b51..0000000000 --- a/examples/lm3s6965/examples/pool.rs_old +++ /dev/null @@ -1,69 +0,0 @@ -//! examples/pool.rs - -#![no_main] -#![no_std] -#![deny(warnings)] - -use heapless::{ - pool, - pool::singleton::{Box, Pool}, -}; -use panic_semihosting as _; -use rtic::app; - -// Declare a pool of 128-byte memory blocks -pool!(P: [u8; 128]); - -#[app(device = lm3s6965, dispatchers = [SSI0, QEI0])] -mod app { - use crate::{Box, Pool}; - use cortex_m_semihosting::debug; - use lm3s6965::Interrupt; - - // Import the memory pool into scope - use super::P; - - #[shared] - struct Shared {} - - #[local] - struct Local {} - - #[init(local = [memory: [u8; 512] = [0; 512]])] - fn init(cx: init::Context) -> (Shared, Local) { - // Increase the capacity of the memory pool by ~4 - P::grow(cx.local.memory); - - rtic::pend(Interrupt::I2C0); - - (Shared {}, Local {}) - } - - #[task(binds = I2C0, priority = 2)] - fn i2c0(_: i2c0::Context) { - // claim a memory block, initialize it and .. - let x = P::alloc().unwrap().init([0u8; 128]); - - // .. send it to the `foo` task - foo::spawn(x).ok().unwrap(); - - // send another block to the task `bar` - bar::spawn(P::alloc().unwrap().init([0u8; 128])) - .ok() - .unwrap(); - } - - #[task] - async fn foo(_: foo::Context, _x: Box

) { - // explicitly return the block to the pool - drop(_x); - - debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator - } - - #[task(priority = 2)] - async fn bar(_: bar::Context, _x: Box

) { - // this is done automatically so we can omit the call to `drop` - // drop(_x); - } -}